var/home/core/zuul-output/0000755000175000017500000000000015110260400014512 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110273774015500 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005614170715110273765017717 0ustar rootrootNov 22 07:11:01 crc systemd[1]: Starting Kubernetes Kubelet... Nov 22 07:11:01 crc restorecon[4694]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:01 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 07:11:02 crc restorecon[4694]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 22 07:11:03 crc kubenswrapper[4929]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.561853 4929 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567786 4929 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567806 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567814 4929 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567821 4929 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567826 4929 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567832 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567837 4929 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567851 4929 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567859 4929 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567866 4929 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567872 4929 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567876 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567881 4929 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567886 4929 feature_gate.go:330] unrecognized feature gate: Example Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567891 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567895 4929 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567899 4929 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567904 4929 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567908 4929 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567931 4929 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567938 4929 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567942 4929 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567947 4929 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567951 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567957 4929 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567962 4929 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567966 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567971 4929 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567975 4929 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567980 4929 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567987 4929 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.567993 4929 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568000 4929 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568006 4929 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568012 4929 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568017 4929 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568022 4929 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568027 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568032 4929 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568037 4929 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568043 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568048 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568053 4929 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568058 4929 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568063 4929 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568069 4929 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568074 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568078 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568083 4929 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568087 4929 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568092 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568096 4929 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568101 4929 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568105 4929 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568109 4929 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568114 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568118 4929 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568123 4929 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568127 4929 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568132 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568136 4929 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568141 4929 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568146 4929 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568151 4929 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568155 4929 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568160 4929 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568165 4929 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568169 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568174 4929 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568178 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.568183 4929 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569350 4929 flags.go:64] FLAG: --address="0.0.0.0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569366 4929 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569378 4929 flags.go:64] FLAG: --anonymous-auth="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569386 4929 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569393 4929 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569399 4929 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569407 4929 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569414 4929 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569421 4929 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569429 4929 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569435 4929 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569441 4929 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569446 4929 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569452 4929 flags.go:64] FLAG: --cgroup-root="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569457 4929 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569463 4929 flags.go:64] FLAG: --client-ca-file="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569468 4929 flags.go:64] FLAG: --cloud-config="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569474 4929 flags.go:64] FLAG: --cloud-provider="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569479 4929 flags.go:64] FLAG: --cluster-dns="[]" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569485 4929 flags.go:64] FLAG: --cluster-domain="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569490 4929 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569496 4929 flags.go:64] FLAG: --config-dir="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569501 4929 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569507 4929 flags.go:64] FLAG: --container-log-max-files="5" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569514 4929 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569520 4929 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569525 4929 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569531 4929 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569536 4929 flags.go:64] FLAG: --contention-profiling="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569541 4929 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569546 4929 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569552 4929 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569557 4929 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569565 4929 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569570 4929 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569575 4929 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569581 4929 flags.go:64] FLAG: --enable-load-reader="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569586 4929 flags.go:64] FLAG: --enable-server="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569591 4929 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569599 4929 flags.go:64] FLAG: --event-burst="100" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569604 4929 flags.go:64] FLAG: --event-qps="50" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569611 4929 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569617 4929 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569623 4929 flags.go:64] FLAG: --eviction-hard="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569630 4929 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569636 4929 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569641 4929 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569647 4929 flags.go:64] FLAG: --eviction-soft="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569652 4929 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569657 4929 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569663 4929 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569668 4929 flags.go:64] FLAG: --experimental-mounter-path="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569673 4929 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569679 4929 flags.go:64] FLAG: --fail-swap-on="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569684 4929 flags.go:64] FLAG: --feature-gates="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569690 4929 flags.go:64] FLAG: --file-check-frequency="20s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569696 4929 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569702 4929 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569707 4929 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569712 4929 flags.go:64] FLAG: --healthz-port="10248" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569718 4929 flags.go:64] FLAG: --help="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569723 4929 flags.go:64] FLAG: --hostname-override="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569728 4929 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569734 4929 flags.go:64] FLAG: --http-check-frequency="20s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569739 4929 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569744 4929 flags.go:64] FLAG: --image-credential-provider-config="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569749 4929 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569755 4929 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569760 4929 flags.go:64] FLAG: --image-service-endpoint="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569765 4929 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569771 4929 flags.go:64] FLAG: --kube-api-burst="100" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569776 4929 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569782 4929 flags.go:64] FLAG: --kube-api-qps="50" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569788 4929 flags.go:64] FLAG: --kube-reserved="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569794 4929 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569799 4929 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569804 4929 flags.go:64] FLAG: --kubelet-cgroups="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569809 4929 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569815 4929 flags.go:64] FLAG: --lock-file="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569820 4929 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569826 4929 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569832 4929 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569840 4929 flags.go:64] FLAG: --log-json-split-stream="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569845 4929 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569851 4929 flags.go:64] FLAG: --log-text-split-stream="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569856 4929 flags.go:64] FLAG: --logging-format="text" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569861 4929 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569867 4929 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569872 4929 flags.go:64] FLAG: --manifest-url="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569877 4929 flags.go:64] FLAG: --manifest-url-header="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569883 4929 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569889 4929 flags.go:64] FLAG: --max-open-files="1000000" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569895 4929 flags.go:64] FLAG: --max-pods="110" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569901 4929 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569906 4929 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569911 4929 flags.go:64] FLAG: --memory-manager-policy="None" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569916 4929 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569922 4929 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569927 4929 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569933 4929 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569946 4929 flags.go:64] FLAG: --node-status-max-images="50" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569952 4929 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569957 4929 flags.go:64] FLAG: --oom-score-adj="-999" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569962 4929 flags.go:64] FLAG: --pod-cidr="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569967 4929 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569977 4929 flags.go:64] FLAG: --pod-manifest-path="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569984 4929 flags.go:64] FLAG: --pod-max-pids="-1" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569989 4929 flags.go:64] FLAG: --pods-per-core="0" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569994 4929 flags.go:64] FLAG: --port="10250" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.569999 4929 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570005 4929 flags.go:64] FLAG: --provider-id="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570010 4929 flags.go:64] FLAG: --qos-reserved="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570015 4929 flags.go:64] FLAG: --read-only-port="10255" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570021 4929 flags.go:64] FLAG: --register-node="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570026 4929 flags.go:64] FLAG: --register-schedulable="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570032 4929 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570042 4929 flags.go:64] FLAG: --registry-burst="10" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570047 4929 flags.go:64] FLAG: --registry-qps="5" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570052 4929 flags.go:64] FLAG: --reserved-cpus="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570058 4929 flags.go:64] FLAG: --reserved-memory="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570065 4929 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570070 4929 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570076 4929 flags.go:64] FLAG: --rotate-certificates="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570082 4929 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570088 4929 flags.go:64] FLAG: --runonce="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570094 4929 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570099 4929 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570105 4929 flags.go:64] FLAG: --seccomp-default="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570110 4929 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570116 4929 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570121 4929 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570127 4929 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570132 4929 flags.go:64] FLAG: --storage-driver-password="root" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570138 4929 flags.go:64] FLAG: --storage-driver-secure="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570143 4929 flags.go:64] FLAG: --storage-driver-table="stats" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570148 4929 flags.go:64] FLAG: --storage-driver-user="root" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570154 4929 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570160 4929 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570166 4929 flags.go:64] FLAG: --system-cgroups="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570171 4929 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570180 4929 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570186 4929 flags.go:64] FLAG: --tls-cert-file="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570191 4929 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570197 4929 flags.go:64] FLAG: --tls-min-version="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570202 4929 flags.go:64] FLAG: --tls-private-key-file="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570207 4929 flags.go:64] FLAG: --topology-manager-policy="none" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570234 4929 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570240 4929 flags.go:64] FLAG: --topology-manager-scope="container" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570245 4929 flags.go:64] FLAG: --v="2" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570252 4929 flags.go:64] FLAG: --version="false" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570259 4929 flags.go:64] FLAG: --vmodule="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570265 4929 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570272 4929 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570399 4929 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570408 4929 feature_gate.go:330] unrecognized feature gate: Example Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570415 4929 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570420 4929 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570426 4929 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570431 4929 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570436 4929 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570440 4929 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570445 4929 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570450 4929 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570455 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570460 4929 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570465 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570471 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570476 4929 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570481 4929 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570485 4929 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570490 4929 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570495 4929 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570500 4929 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570505 4929 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570509 4929 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570514 4929 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570520 4929 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570527 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570532 4929 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570537 4929 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570542 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570547 4929 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570552 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570557 4929 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570562 4929 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570567 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570571 4929 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570576 4929 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570583 4929 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570588 4929 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570592 4929 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570596 4929 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570601 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570607 4929 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570613 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570619 4929 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570624 4929 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570628 4929 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570633 4929 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570638 4929 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570643 4929 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570648 4929 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570652 4929 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570657 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570663 4929 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570668 4929 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570674 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570679 4929 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570683 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570688 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570692 4929 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570697 4929 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570701 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570706 4929 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570711 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570715 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570721 4929 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570727 4929 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570733 4929 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570740 4929 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570746 4929 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570750 4929 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570755 4929 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.570759 4929 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.570768 4929 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.584194 4929 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.584269 4929 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584404 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584417 4929 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584426 4929 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584435 4929 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584446 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584454 4929 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584462 4929 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584470 4929 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584477 4929 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584486 4929 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584493 4929 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584501 4929 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584509 4929 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584516 4929 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584524 4929 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584532 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584540 4929 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584549 4929 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584558 4929 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584569 4929 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584603 4929 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584612 4929 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584622 4929 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584634 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584644 4929 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584653 4929 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584662 4929 feature_gate.go:330] unrecognized feature gate: Example Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584672 4929 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584680 4929 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584688 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584696 4929 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584707 4929 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584718 4929 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584728 4929 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584737 4929 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584746 4929 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584753 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584761 4929 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584769 4929 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584777 4929 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584785 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584792 4929 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584800 4929 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584808 4929 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584816 4929 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584824 4929 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584831 4929 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584840 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584848 4929 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584855 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584863 4929 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584871 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584878 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584887 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584895 4929 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584904 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584914 4929 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584926 4929 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584935 4929 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584946 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584954 4929 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584964 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584972 4929 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584981 4929 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584988 4929 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.584996 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585004 4929 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585012 4929 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585020 4929 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585027 4929 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585035 4929 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.585048 4929 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585328 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585344 4929 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585355 4929 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585366 4929 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585375 4929 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585383 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585392 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585399 4929 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585407 4929 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585416 4929 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585423 4929 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585432 4929 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585439 4929 feature_gate.go:330] unrecognized feature gate: Example Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585447 4929 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585455 4929 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585466 4929 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585475 4929 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585483 4929 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585493 4929 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585502 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585511 4929 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585520 4929 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585529 4929 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585537 4929 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585545 4929 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585553 4929 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585561 4929 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585569 4929 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585576 4929 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585584 4929 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585592 4929 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585600 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585608 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585615 4929 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585623 4929 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585631 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585639 4929 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585648 4929 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585655 4929 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585663 4929 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585670 4929 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585679 4929 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585686 4929 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585695 4929 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585703 4929 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585710 4929 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585718 4929 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585726 4929 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585733 4929 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585741 4929 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585749 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585757 4929 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585765 4929 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585773 4929 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585782 4929 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585792 4929 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585801 4929 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585811 4929 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585819 4929 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585827 4929 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585835 4929 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585843 4929 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585851 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585859 4929 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585867 4929 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585875 4929 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585883 4929 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585891 4929 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585899 4929 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585906 4929 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.585914 4929 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.585926 4929 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.586155 4929 server.go:940] "Client rotation is on, will bootstrap in background" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.594331 4929 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.594453 4929 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.598696 4929 server.go:997] "Starting client certificate rotation" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.598722 4929 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.598943 4929 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-05 10:34:40.19723419 +0000 UTC Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.599062 4929 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1059h23m36.598178869s for next certificate rotation Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.646042 4929 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.647910 4929 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.670002 4929 log.go:25] "Validated CRI v1 runtime API" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.736993 4929 log.go:25] "Validated CRI v1 image API" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.739365 4929 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.751124 4929 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-22-07-05-38-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.751174 4929 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.773681 4929 manager.go:217] Machine: {Timestamp:2025-11-22 07:11:03.769929211 +0000 UTC m=+0.879383254 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:ef0002b7-0f16-47be-ac5a-3e7125d8469f BootID:35e6b744-d50b-4680-8a02-13229aa01a6f Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:d3:77:af Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:d3:77:af Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:8b:73:19 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:bf:b8:14 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:2c:bc:3f Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:8c:c1:90 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:a2:e7:87:a7:cb:63 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:be:94:ca:50:bb:75 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.773965 4929 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.774163 4929 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.775618 4929 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.775854 4929 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.775887 4929 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.776147 4929 topology_manager.go:138] "Creating topology manager with none policy" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.776160 4929 container_manager_linux.go:303] "Creating device plugin manager" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.776704 4929 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.776745 4929 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.778040 4929 state_mem.go:36] "Initialized new in-memory state store" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.778444 4929 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.788783 4929 kubelet.go:418] "Attempting to sync node with API server" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.788834 4929 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.788863 4929 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.788878 4929 kubelet.go:324] "Adding apiserver pod source" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.788891 4929 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.793615 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.793682 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.796043 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.796119 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.796074 4929 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.797908 4929 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.801451 4929 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803078 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803106 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803121 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803129 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803143 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803151 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803159 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803175 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803184 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803192 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803232 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.803242 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.807682 4929 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.808202 4929 server.go:1280] "Started kubelet" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.808357 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.809289 4929 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.809294 4929 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.809807 4929 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 22 07:11:03 crc systemd[1]: Started Kubernetes Kubelet. Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.811186 4929 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.817005 4929 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 00:56:52.316644856 +0000 UTC Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.811199 4929 server.go:460] "Adding debug handlers to kubelet server" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.817101 4929 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 641h45m48.499551653s for next certificate rotation Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.816929 4929 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.818749 4929 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.818899 4929 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.818924 4929 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.818965 4929 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.819714 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="200ms" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.819731 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.819974 4929 factory.go:55] Registering systemd factory Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820017 4929 factory.go:221] Registration of the systemd container factory successfully Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.820122 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820706 4929 factory.go:153] Registering CRI-O factory Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820752 4929 factory.go:221] Registration of the crio container factory successfully Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820864 4929 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820930 4929 factory.go:103] Registering Raw factory Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.820956 4929 manager.go:1196] Started watching for new ooms in manager Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.820730 4929 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.27:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a42a11cbb5222 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,LastTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.822105 4929 manager.go:319] Starting recovery of all containers Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.831909 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832000 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832015 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832028 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832040 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832056 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832072 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832085 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832102 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832114 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832126 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832167 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832183 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832196 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832226 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832243 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832257 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832269 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832281 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832293 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832306 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832321 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832335 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832352 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832366 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832379 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832395 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832434 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832469 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832503 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832519 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832533 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832547 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832559 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832572 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832584 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832596 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832611 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832623 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832637 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832649 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832661 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832673 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832685 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832700 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832712 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832725 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832741 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832752 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832765 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832778 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832819 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832839 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832852 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832863 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832877 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832890 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832904 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832917 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832931 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832944 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832955 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832968 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832981 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.832993 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833005 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833016 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833029 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833040 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833053 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833065 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833079 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833091 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833103 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833116 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833128 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833140 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833151 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833164 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833177 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833190 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833203 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833233 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833246 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833256 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833270 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833285 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833298 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833310 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833324 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833336 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833347 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833359 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833373 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833387 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833403 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833416 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833428 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833441 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833455 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833471 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833484 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833497 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833510 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833530 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833545 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833559 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833572 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833588 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833602 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833615 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833630 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833644 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833660 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833673 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833686 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833699 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833712 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833726 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833740 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833753 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833766 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833780 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833792 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833806 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833818 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833832 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833849 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833862 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833875 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833887 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833899 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833912 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833926 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833940 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833954 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833968 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.833983 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834003 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834018 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834030 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834043 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834056 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834070 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834081 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834093 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834106 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834129 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834142 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834154 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834166 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834178 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834192 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834221 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834234 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834247 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834260 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834272 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834285 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834299 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834313 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834325 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834338 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834351 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834362 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834375 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834389 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834400 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834412 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834424 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834438 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834450 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834463 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834474 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834486 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834500 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834514 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834526 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834539 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834552 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834565 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834579 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834593 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834605 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834620 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834633 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834649 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834661 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834674 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834688 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834702 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834720 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834733 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834746 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834759 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834770 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834783 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834795 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834807 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834821 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834837 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834849 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834863 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834877 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.834891 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.845722 4929 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.846030 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.846052 4929 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.846061 4929 reconstruct.go:97] "Volume reconstruction finished" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.846069 4929 reconciler.go:26] "Reconciler: start to sync state" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.846954 4929 manager.go:324] Recovery completed Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.857337 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.859707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.859764 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.859775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.860805 4929 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.860824 4929 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.860848 4929 state_mem.go:36] "Initialized new in-memory state store" Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.919119 4929 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.944021 4929 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.945933 4929 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.945995 4929 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 22 07:11:03 crc kubenswrapper[4929]: I1122 07:11:03.946028 4929 kubelet.go:2335] "Starting kubelet main sync loop" Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.946100 4929 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 22 07:11:03 crc kubenswrapper[4929]: W1122 07:11:03.948158 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:03 crc kubenswrapper[4929]: E1122 07:11:03.948282 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.020092 4929 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.020705 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="400ms" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.046394 4929 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.094457 4929 policy_none.go:49] "None policy: Start" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.095334 4929 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.095360 4929 state_mem.go:35] "Initializing new in-memory state store" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.120481 4929 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.206474 4929 manager.go:334] "Starting Device Plugin manager" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.206561 4929 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.206576 4929 server.go:79] "Starting device plugin registration server" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.207062 4929 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.207083 4929 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.207251 4929 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.207396 4929 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.207410 4929 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.216928 4929 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.247542 4929 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.247660 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249159 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249177 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249602 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249583 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.249647 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.250974 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251001 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251116 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251320 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251376 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251735 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.251851 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252015 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252060 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252170 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252195 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252220 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252543 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252554 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252694 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252786 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252819 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252877 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252914 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.252924 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253491 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253529 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.253536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.254158 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.254183 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.254193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.257838 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.257880 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.257892 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.307700 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.308804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.308838 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.308849 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.308872 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.309441 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350246 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350304 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350335 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350354 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350375 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350411 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350437 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350453 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350521 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350570 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350595 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350614 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350630 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350645 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.350661 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.422296 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="800ms" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451461 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451537 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451570 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451621 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451636 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451701 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451738 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451729 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451804 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451715 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451910 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451942 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452008 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452014 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452057 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452071 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452091 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452120 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452134 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452164 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452327 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.451736 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452260 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452424 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452440 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452253 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452522 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452563 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.452662 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.510419 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.512428 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.512481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.512504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.512532 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.513128 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.594195 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.601533 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.631190 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.637410 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.641662 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.697069 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.697154 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.728552 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-0af36d3e835dccffe86bfa6c4dc5779bf12aaa0f5e1ac2e03e63cf275bfbf0da WatchSource:0}: Error finding container 0af36d3e835dccffe86bfa6c4dc5779bf12aaa0f5e1ac2e03e63cf275bfbf0da: Status 404 returned error can't find the container with id 0af36d3e835dccffe86bfa6c4dc5779bf12aaa0f5e1ac2e03e63cf275bfbf0da Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.730610 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-f0de58dab6460665780a0a9068064222f8567048b5afdac56bd55550daf77800 WatchSource:0}: Error finding container f0de58dab6460665780a0a9068064222f8567048b5afdac56bd55550daf77800: Status 404 returned error can't find the container with id f0de58dab6460665780a0a9068064222f8567048b5afdac56bd55550daf77800 Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.732539 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-ef7427fa78f7e7e1d108aac723a1e0110e7785bddcd71635b2b9b2c085100aaf WatchSource:0}: Error finding container ef7427fa78f7e7e1d108aac723a1e0110e7785bddcd71635b2b9b2c085100aaf: Status 404 returned error can't find the container with id ef7427fa78f7e7e1d108aac723a1e0110e7785bddcd71635b2b9b2c085100aaf Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.733415 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-65819c0537cd5c2c5b7122faa1bd818f698e7f40156b622a3782ac79ac9a27a1 WatchSource:0}: Error finding container 65819c0537cd5c2c5b7122faa1bd818f698e7f40156b622a3782ac79ac9a27a1: Status 404 returned error can't find the container with id 65819c0537cd5c2c5b7122faa1bd818f698e7f40156b622a3782ac79ac9a27a1 Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.735983 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-672c2fb365a7527e0a096a3b0fc18020035a1ba25b39401848af0034893343a0 WatchSource:0}: Error finding container 672c2fb365a7527e0a096a3b0fc18020035a1ba25b39401848af0034893343a0: Status 404 returned error can't find the container with id 672c2fb365a7527e0a096a3b0fc18020035a1ba25b39401848af0034893343a0 Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.809244 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.913935 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.915440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.915522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.915566 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.915613 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.916392 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.950482 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f0de58dab6460665780a0a9068064222f8567048b5afdac56bd55550daf77800"} Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.951646 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0af36d3e835dccffe86bfa6c4dc5779bf12aaa0f5e1ac2e03e63cf275bfbf0da"} Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.954249 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"672c2fb365a7527e0a096a3b0fc18020035a1ba25b39401848af0034893343a0"} Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.956632 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"65819c0537cd5c2c5b7122faa1bd818f698e7f40156b622a3782ac79ac9a27a1"} Nov 22 07:11:04 crc kubenswrapper[4929]: I1122 07:11:04.957504 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ef7427fa78f7e7e1d108aac723a1e0110e7785bddcd71635b2b9b2c085100aaf"} Nov 22 07:11:04 crc kubenswrapper[4929]: W1122 07:11:04.960314 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:04 crc kubenswrapper[4929]: E1122 07:11:04.960382 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:05 crc kubenswrapper[4929]: W1122 07:11:05.144634 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:05 crc kubenswrapper[4929]: E1122 07:11:05.144736 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:05 crc kubenswrapper[4929]: W1122 07:11:05.172664 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:05 crc kubenswrapper[4929]: E1122 07:11:05.172764 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:05 crc kubenswrapper[4929]: E1122 07:11:05.224307 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="1.6s" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.716806 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.718787 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.718856 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.718880 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.718919 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:05 crc kubenswrapper[4929]: E1122 07:11:05.719587 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:05 crc kubenswrapper[4929]: I1122 07:11:05.809553 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:06 crc kubenswrapper[4929]: E1122 07:11:06.790964 4929 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.27:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a42a11cbb5222 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,LastTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 07:11:06 crc kubenswrapper[4929]: I1122 07:11:06.809799 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:06 crc kubenswrapper[4929]: E1122 07:11:06.824936 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="3.2s" Nov 22 07:11:06 crc kubenswrapper[4929]: W1122 07:11:06.978997 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:06 crc kubenswrapper[4929]: E1122 07:11:06.979048 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.319787 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.322306 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.322371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.322388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.322457 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:07 crc kubenswrapper[4929]: E1122 07:11:07.323785 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:07 crc kubenswrapper[4929]: W1122 07:11:07.411877 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:07 crc kubenswrapper[4929]: E1122 07:11:07.411981 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:07 crc kubenswrapper[4929]: W1122 07:11:07.498335 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:07 crc kubenswrapper[4929]: E1122 07:11:07.498462 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:07 crc kubenswrapper[4929]: I1122 07:11:07.810041 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:07 crc kubenswrapper[4929]: W1122 07:11:07.856534 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:07 crc kubenswrapper[4929]: E1122 07:11:07.856678 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:08 crc kubenswrapper[4929]: I1122 07:11:08.809835 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:09 crc kubenswrapper[4929]: I1122 07:11:09.810060 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:10 crc kubenswrapper[4929]: E1122 07:11:10.025992 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="6.4s" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.524769 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.526567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.526620 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.526633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.526661 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:10 crc kubenswrapper[4929]: E1122 07:11:10.527257 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:10 crc kubenswrapper[4929]: I1122 07:11:10.810378 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:11 crc kubenswrapper[4929]: W1122 07:11:11.719888 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:11 crc kubenswrapper[4929]: E1122 07:11:11.720156 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:11 crc kubenswrapper[4929]: W1122 07:11:11.747283 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:11 crc kubenswrapper[4929]: E1122 07:11:11.747359 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.810315 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.974106 4929 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec" exitCode=0 Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.974192 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec"} Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.974197 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.975561 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.975598 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.975610 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.975981 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc"} Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.978524 4929 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551" exitCode=0 Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.978681 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551"} Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.978841 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.980677 4929 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57" exitCode=0 Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.980719 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57"} Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.981356 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.983708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.983753 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.983782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.983710 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.983834 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.984631 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.985951 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.986669 4929 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165" exitCode=0 Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.986714 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165"} Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.986825 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987192 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987842 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987888 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:11 crc kubenswrapper[4929]: I1122 07:11:11.987909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:12 crc kubenswrapper[4929]: I1122 07:11:12.809913 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:13 crc kubenswrapper[4929]: W1122 07:11:13.156471 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:13 crc kubenswrapper[4929]: E1122 07:11:13.156635 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:13 crc kubenswrapper[4929]: W1122 07:11:13.470424 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:13 crc kubenswrapper[4929]: E1122 07:11:13.470518 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:13 crc kubenswrapper[4929]: I1122 07:11:13.808992 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:13 crc kubenswrapper[4929]: I1122 07:11:13.995765 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2"} Nov 22 07:11:13 crc kubenswrapper[4929]: I1122 07:11:13.999804 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1"} Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.004490 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf"} Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.004893 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.006318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.006358 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.006370 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.008353 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b"} Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.010181 4929 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756" exitCode=0 Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.010235 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756"} Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.010293 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.011238 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.011289 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.011306 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:14 crc kubenswrapper[4929]: E1122 07:11:14.217053 4929 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 07:11:14 crc kubenswrapper[4929]: I1122 07:11:14.809169 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.014559 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7"} Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.017196 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5"} Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.019369 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376"} Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.020233 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.021139 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.021205 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.021252 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:15 crc kubenswrapper[4929]: I1122 07:11:15.809459 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.026783 4929 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26" exitCode=0 Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.026849 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26"} Nov 22 07:11:16 crc kubenswrapper[4929]: E1122 07:11:16.427019 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.27:6443: connect: connection refused" interval="7s" Nov 22 07:11:16 crc kubenswrapper[4929]: E1122 07:11:16.792722 4929 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.27:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a42a11cbb5222 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,LastTimestamp:2025-11-22 07:11:03.808176674 +0000 UTC m=+0.917630687,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.809480 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.927778 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.928911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.929012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.929028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:16 crc kubenswrapper[4929]: I1122 07:11:16.929052 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:16 crc kubenswrapper[4929]: E1122 07:11:16.929514 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.27:6443: connect: connection refused" node="crc" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.037595 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df"} Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.041353 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923"} Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.041415 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.042592 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.042644 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.042662 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.044888 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd"} Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.044970 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.044982 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046265 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046340 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046325 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.046423 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.290844 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:17 crc kubenswrapper[4929]: I1122 07:11:17.809456 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.051434 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657"} Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.055568 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a"} Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.055643 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.055746 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.055775 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.056932 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.056973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.056990 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.057010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.057041 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.057060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.274153 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:18 crc kubenswrapper[4929]: I1122 07:11:18.809081 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.062108 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa"} Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.062185 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.062235 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063691 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063756 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.063769 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.810586 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.820899 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:19 crc kubenswrapper[4929]: W1122 07:11:19.865617 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:19 crc kubenswrapper[4929]: E1122 07:11:19.865797 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:19 crc kubenswrapper[4929]: I1122 07:11:19.920175 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.064334 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.064335 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065722 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065757 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065794 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065819 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065793 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.065915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.555377 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:20 crc kubenswrapper[4929]: I1122 07:11:20.809365 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.070586 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb"} Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.072729 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.075067 4929 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa" exitCode=255 Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.075140 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa"} Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.075177 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.075275 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076162 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076478 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076520 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.076534 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.077164 4929 scope.go:117] "RemoveContainer" containerID="325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.079944 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:21 crc kubenswrapper[4929]: W1122 07:11:21.080610 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:21 crc kubenswrapper[4929]: E1122 07:11:21.080706 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.27:6443: connect: connection refused" logger="UnhandledError" Nov 22 07:11:21 crc kubenswrapper[4929]: I1122 07:11:21.809672 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.27:6443: connect: connection refused Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.035020 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.083053 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.085257 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8"} Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.085359 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.085419 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.086980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.087016 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.087028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.090319 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b"} Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.090351 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d"} Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.090387 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.091167 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.091198 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.091230 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:22 crc kubenswrapper[4929]: I1122 07:11:22.264478 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.097540 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054"} Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.097626 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.097647 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.097724 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.097765 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099069 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099111 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099118 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099135 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099161 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.099393 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.929671 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.931045 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.931116 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.931134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:23 crc kubenswrapper[4929]: I1122 07:11:23.931201 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.100390 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.100452 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101570 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101885 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101921 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:24 crc kubenswrapper[4929]: I1122 07:11:24.101932 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:24 crc kubenswrapper[4929]: E1122 07:11:24.217176 4929 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.035181 4929 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.035371 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.785462 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.785725 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.787165 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.787204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:25 crc kubenswrapper[4929]: I1122 07:11:25.787230 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:31 crc kubenswrapper[4929]: I1122 07:11:31.404363 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 22 07:11:31 crc kubenswrapper[4929]: I1122 07:11:31.404532 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:31 crc kubenswrapper[4929]: I1122 07:11:31.405536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:31 crc kubenswrapper[4929]: I1122 07:11:31.405596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:31 crc kubenswrapper[4929]: I1122 07:11:31.405607 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:32 crc kubenswrapper[4929]: I1122 07:11:32.265178 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 07:11:32 crc kubenswrapper[4929]: I1122 07:11:32.265307 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 07:11:32 crc kubenswrapper[4929]: I1122 07:11:32.405304 4929 patch_prober.go:28] interesting pod/etcd-crc container/etcd namespace/openshift-etcd: Startup probe status=failure output="Get \"https://192.168.126.11:9980/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 07:11:32 crc kubenswrapper[4929]: I1122 07:11:32.405578 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-etcd/etcd-crc" podUID="2139d3e2895fc6797b9c76a1b4c9886d" containerName="etcd" probeResult="failure" output="Get \"https://192.168.126.11:9980/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:11:32 crc kubenswrapper[4929]: I1122 07:11:32.810746 4929 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 22 07:11:33 crc kubenswrapper[4929]: E1122 07:11:33.428304 4929 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="7s" Nov 22 07:11:33 crc kubenswrapper[4929]: W1122 07:11:33.784807 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 22 07:11:33 crc kubenswrapper[4929]: I1122 07:11:33.784943 4929 trace.go:236] Trace[1222012577]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 07:11:23.783) (total time: 10001ms): Nov 22 07:11:33 crc kubenswrapper[4929]: Trace[1222012577]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (07:11:33.784) Nov 22 07:11:33 crc kubenswrapper[4929]: Trace[1222012577]: [10.001420009s] [10.001420009s] END Nov 22 07:11:33 crc kubenswrapper[4929]: E1122 07:11:33.784973 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 22 07:11:33 crc kubenswrapper[4929]: E1122 07:11:33.932592 4929 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 22 07:11:34 crc kubenswrapper[4929]: E1122 07:11:34.217290 4929 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 07:11:35 crc kubenswrapper[4929]: I1122 07:11:35.035945 4929 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 07:11:35 crc kubenswrapper[4929]: I1122 07:11:35.036047 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 07:11:35 crc kubenswrapper[4929]: W1122 07:11:35.644056 4929 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 22 07:11:35 crc kubenswrapper[4929]: I1122 07:11:35.644181 4929 trace.go:236] Trace[1045487023]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 07:11:25.642) (total time: 10001ms): Nov 22 07:11:35 crc kubenswrapper[4929]: Trace[1045487023]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (07:11:35.644) Nov 22 07:11:35 crc kubenswrapper[4929]: Trace[1045487023]: [10.001320168s] [10.001320168s] END Nov 22 07:11:35 crc kubenswrapper[4929]: E1122 07:11:35.644247 4929 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 22 07:11:36 crc kubenswrapper[4929]: I1122 07:11:36.046350 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 07:11:36 crc kubenswrapper[4929]: I1122 07:11:36.046440 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.267908 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.268061 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.269021 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.269050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.269059 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:37 crc kubenswrapper[4929]: I1122 07:11:37.271574 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:38 crc kubenswrapper[4929]: I1122 07:11:38.141233 4929 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 07:11:38 crc kubenswrapper[4929]: I1122 07:11:38.141281 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:38 crc kubenswrapper[4929]: I1122 07:11:38.142133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:38 crc kubenswrapper[4929]: I1122 07:11:38.142201 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:38 crc kubenswrapper[4929]: I1122 07:11:38.142244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.336884 4929 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.933064 4929 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.934986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.935056 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.935069 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:40 crc kubenswrapper[4929]: I1122 07:11:40.935107 4929 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.211339 4929 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.213394 4929 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.213725 4929 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.213785 4929 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.217309 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.217347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.217362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.217380 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.217395 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.219265 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.234504 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.241024 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.246027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.246065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.246076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.246095 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.246106 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.260866 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.261942 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59712->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.261970 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59716->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.262000 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59712->192.168.126.11:17697: read: connection reset by peer" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.262010 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:59716->192.168.126.11:17697: read: connection reset by peer" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.262322 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.262356 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.264479 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.264507 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.264516 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.264531 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.264541 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.273968 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.276561 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.276589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.276600 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.276616 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.276627 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.286366 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.289545 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.289573 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.289582 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.289596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.289608 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.299467 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.299640 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.301239 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.301266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.301276 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.301292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.301304 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.302700 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.306318 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.403185 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.403240 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.403254 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.403272 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.403289 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.508199 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.508258 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.508269 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.508285 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.508312 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.611304 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.611355 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.611371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.611392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.611406 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.713613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.713683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.713705 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.713734 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.713756 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.810583 4929 apiserver.go:52] "Watching apiserver" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.814837 4929 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.815227 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-etcd/etcd-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.815606 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.815714 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.815822 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.815971 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816432 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.816436 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816485 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816535 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816667 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.816751 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.816961 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.817048 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.818559 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.819206 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.819551 4929 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.819600 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.820240 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.820864 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.821058 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.821546 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.821557 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.821832 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.864108 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.885974 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.906054 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.915872 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.915930 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.915964 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.915991 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916022 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916050 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916075 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916101 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916137 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916278 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916308 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916339 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916383 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916408 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916433 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916472 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916495 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916520 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916541 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916564 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916607 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916631 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916663 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916685 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916761 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916784 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916809 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916835 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916863 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916887 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916941 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916962 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916987 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.916982 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.918127 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.918393 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.918563 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.918749 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.918828 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.919376 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.919709 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.919929 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.917010 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920006 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920050 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920082 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920115 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920187 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920243 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920279 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920305 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920331 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920364 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920392 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920420 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920450 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920482 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920509 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920537 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920563 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920589 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920615 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920642 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920667 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920173 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920343 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.920465 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921341 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921418 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921568 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921588 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921690 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921750 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921806 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921932 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921983 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.921999 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922038 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922087 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922138 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922191 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922184 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922288 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922292 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922347 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922403 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922455 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922512 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922525 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922562 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922613 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922663 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922738 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922792 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.922896 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.923813 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.923936 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.924057 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.924709 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.924743 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.924706 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.924948 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.925843 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.926127 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.926690 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.926957 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.927365 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.928372 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.928853 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.929040 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.929098 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.929873 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.930830 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.930908 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.930961 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.931068 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.931155 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.931253 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.931347 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.932038 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.932004 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933507 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933641 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933772 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.933791 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:42Z","lastTransitionTime":"2025-11-22T07:11:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934333 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934443 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934495 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934521 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934544 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934569 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934563 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934600 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934623 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934649 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934670 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934690 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934739 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934764 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934788 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934812 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934836 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934863 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934884 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934908 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934932 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934963 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.934989 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935011 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935032 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935055 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935045 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935079 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935102 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935125 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935146 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935190 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935280 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935301 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935363 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935372 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935396 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.935438 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:11:43.435409098 +0000 UTC m=+40.544863111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935471 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935500 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935525 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935564 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935588 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935612 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935636 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935644 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935657 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935749 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935800 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935879 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.935955 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936000 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936034 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936078 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936091 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936122 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936148 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936207 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936298 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936350 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936427 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936478 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936532 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936608 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936662 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936715 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936792 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936895 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936943 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936946 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.936999 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937085 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937135 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937204 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937287 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937300 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937338 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937385 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937401 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937439 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937532 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937591 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937629 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937669 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937706 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937743 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937800 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937838 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937885 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937939 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938016 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938066 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938116 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938193 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938311 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938395 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938451 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938534 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938637 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938767 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938815 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938866 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938916 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938987 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939046 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939101 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939159 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939304 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939371 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939468 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939558 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939615 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939699 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939783 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939930 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939972 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940011 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940067 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940148 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940242 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940296 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940374 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940424 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940474 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940535 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940589 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940638 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940880 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940934 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940993 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941012 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941077 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937444 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937668 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941583 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937701 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937447 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.937963 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938059 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938519 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938571 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.938989 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939148 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941727 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939374 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939382 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939368 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939612 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939665 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939678 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.931890 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939797 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.939862 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940236 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940292 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940271 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940435 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940301 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940478 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940549 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.940863 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941279 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941292 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941335 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941438 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942126 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942286 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942321 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942571 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942567 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.942848 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.943071 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.943139 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.944281 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.945067 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.945805 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.945845 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.945865 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.941525 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946060 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946958 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947013 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947243 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947284 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947319 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947354 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947384 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947502 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947525 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947542 4929 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947558 4929 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947573 4929 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947589 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947606 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947623 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947638 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947653 4929 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947666 4929 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947682 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954087 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954819 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955062 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955612 4929 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955959 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.956152 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.956485 4929 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.956664 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.956858 4929 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.956973 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957075 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957198 4929 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957390 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957486 4929 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957576 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957657 4929 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957763 4929 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957885 4929 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957982 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958071 4929 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958152 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958303 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958383 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958458 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958556 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958651 4929 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958758 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958836 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958914 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.958991 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959068 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959143 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959245 4929 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959347 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959436 4929 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959509 4929 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959598 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959699 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959780 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959871 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.959988 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.960086 4929 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.960175 4929 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.960294 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.952352 4929 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953719 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.960383 4929 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961142 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961169 4929 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961187 4929 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961202 4929 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961227 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961243 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961257 4929 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961273 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961289 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961303 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961319 4929 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961337 4929 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961350 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961361 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961371 4929 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961382 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961393 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961405 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961418 4929 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961430 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961442 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961455 4929 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961470 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961489 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961505 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961520 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961534 4929 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961547 4929 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961561 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961573 4929 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961586 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961598 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961611 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961623 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961635 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961646 4929 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961658 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961669 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961681 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961690 4929 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961702 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961713 4929 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961724 4929 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961734 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.961747 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.945941 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946018 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946033 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946074 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946345 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946557 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.946769 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947237 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947304 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947554 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.947662 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.948362 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.948461 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.948775 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.949070 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.949070 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.949199 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.950171 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.950943 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.951034 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.951118 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.951665 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.951728 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.962117 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:43.462093171 +0000 UTC m=+40.571547184 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.951933 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.951951 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953029 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953119 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.953276 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.962413 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:43.462402999 +0000 UTC m=+40.571857012 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953383 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953430 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953588 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953627 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953797 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953918 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953993 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954017 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954015 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.953753 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954451 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954769 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954542 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954838 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.954544 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955074 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955038 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955522 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.955867 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.957269 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.967534 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.983957 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984011 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984035 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984125 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:43.484094285 +0000 UTC m=+40.593548338 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.984026 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984359 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984378 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984393 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.984384 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:42 crc kubenswrapper[4929]: E1122 07:11:42.984430 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:43.484419523 +0000 UTC m=+40.593873546 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.985033 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.988053 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.990770 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:42 crc kubenswrapper[4929]: I1122 07:11:42.997877 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.001544 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.008155 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.017703 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.036775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.036813 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.036826 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.036844 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.036857 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.062548 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.062697 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.062849 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.062997 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063005 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063150 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063339 4929 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063408 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063427 4929 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063441 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063456 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063480 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063492 4929 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063505 4929 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063516 4929 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063526 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063540 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063553 4929 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063571 4929 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063582 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063593 4929 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063604 4929 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063615 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063625 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063636 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063645 4929 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063655 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063665 4929 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063678 4929 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063696 4929 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063709 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063722 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063732 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063741 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063750 4929 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063760 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063769 4929 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063780 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063788 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063801 4929 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063811 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063821 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063830 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063840 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063849 4929 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063859 4929 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063868 4929 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063878 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063887 4929 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063897 4929 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.063908 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.133952 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.139973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.140029 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.140044 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.140065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.140079 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: W1122 07:11:43.153651 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-9109e5cd5a918b57d864ea46c9cd4b4e79f5c1d2404a8d67b351935513a75448 WatchSource:0}: Error finding container 9109e5cd5a918b57d864ea46c9cd4b4e79f5c1d2404a8d67b351935513a75448: Status 404 returned error can't find the container with id 9109e5cd5a918b57d864ea46c9cd4b4e79f5c1d2404a8d67b351935513a75448 Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.157893 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:43 crc kubenswrapper[4929]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Nov 22 07:11:43 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:43 crc kubenswrapper[4929]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Nov 22 07:11:43 crc kubenswrapper[4929]: source /etc/kubernetes/apiserver-url.env Nov 22 07:11:43 crc kubenswrapper[4929]: else Nov 22 07:11:43 crc kubenswrapper[4929]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Nov 22 07:11:43 crc kubenswrapper[4929]: exit 1 Nov 22 07:11:43 crc kubenswrapper[4929]: fi Nov 22 07:11:43 crc kubenswrapper[4929]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Nov 22 07:11:43 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:43 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.159403 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.163456 4929 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.165136 4929 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.232954 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.233373 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.233840 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.233945 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.234024 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.234081 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.234260 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.238982 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.239293 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.239471 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.241629 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.241796 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242189 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242302 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242625 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242713 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242817 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.242902 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.244015 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.245326 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.245788 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.245817 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.246094 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.246182 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.246647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.246698 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.246730 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.251303 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.251540 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.251752 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.251746 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.251809 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.252080 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.252248 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.254606 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.254598 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.254696 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.257057 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.257198 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.257301 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.257649 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.257796 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.258014 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.260234 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266125 4929 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266266 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266325 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266377 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266438 4929 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266496 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266576 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266635 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266687 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266738 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266795 4929 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266847 4929 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266898 4929 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266946 4929 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.266995 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267048 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267105 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267155 4929 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267237 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267306 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267370 4929 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267434 4929 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267484 4929 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267537 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267594 4929 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267647 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267700 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267756 4929 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267807 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267860 4929 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267915 4929 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.267964 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.268022 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.268081 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.268135 4929 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.268187 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.349796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.349830 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.349840 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.349857 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.349869 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.391405 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.391832 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.391973 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.392934 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.394041 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.412608 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.418838 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.425046 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.430396 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.448493 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.452344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.452376 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.452385 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.452400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.452410 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.460479 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 07:11:43 crc kubenswrapper[4929]: W1122 07:11:43.460911 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-0165ff517326cb16089cd32e433024bb745462ee9312e3b3ca4bd5678c7b02e7 WatchSource:0}: Error finding container 0165ff517326cb16089cd32e433024bb745462ee9312e3b3ca4bd5678c7b02e7: Status 404 returned error can't find the container with id 0165ff517326cb16089cd32e433024bb745462ee9312e3b3ca4bd5678c7b02e7 Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.465553 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.466757 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.469836 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.469947 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:11:44.46992223 +0000 UTC m=+41.579376253 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470007 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470062 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470103 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470121 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470134 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470144 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.470144 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470155 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470171 4929 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470184 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.470234 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:44.470182517 +0000 UTC m=+41.579636530 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470266 4929 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.470278 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.470344 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.470440 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:44.470407373 +0000 UTC m=+41.579861386 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: W1122 07:11:43.472244 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-3e6758f2607fbd3ab7a6209b759ef80f7f84abd4f1fcd99cc43c37ae64b2d32f WatchSource:0}: Error finding container 3e6758f2607fbd3ab7a6209b759ef80f7f84abd4f1fcd99cc43c37ae64b2d32f: Status 404 returned error can't find the container with id 3e6758f2607fbd3ab7a6209b759ef80f7f84abd4f1fcd99cc43c37ae64b2d32f Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.474874 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:43 crc kubenswrapper[4929]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 22 07:11:43 crc kubenswrapper[4929]: if [[ -f "/env/_master" ]]; then Nov 22 07:11:43 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:43 crc kubenswrapper[4929]: source "/env/_master" Nov 22 07:11:43 crc kubenswrapper[4929]: set +o allexport Nov 22 07:11:43 crc kubenswrapper[4929]: fi Nov 22 07:11:43 crc kubenswrapper[4929]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Nov 22 07:11:43 crc kubenswrapper[4929]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Nov 22 07:11:43 crc kubenswrapper[4929]: ho_enable="--enable-hybrid-overlay" Nov 22 07:11:43 crc kubenswrapper[4929]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Nov 22 07:11:43 crc kubenswrapper[4929]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Nov 22 07:11:43 crc kubenswrapper[4929]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Nov 22 07:11:43 crc kubenswrapper[4929]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 22 07:11:43 crc kubenswrapper[4929]: --webhook-cert-dir="/etc/webhook-cert" \ Nov 22 07:11:43 crc kubenswrapper[4929]: --webhook-host=127.0.0.1 \ Nov 22 07:11:43 crc kubenswrapper[4929]: --webhook-port=9743 \ Nov 22 07:11:43 crc kubenswrapper[4929]: ${ho_enable} \ Nov 22 07:11:43 crc kubenswrapper[4929]: --enable-interconnect \ Nov 22 07:11:43 crc kubenswrapper[4929]: --disable-approver \ Nov 22 07:11:43 crc kubenswrapper[4929]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Nov 22 07:11:43 crc kubenswrapper[4929]: --wait-for-kubernetes-api=200s \ Nov 22 07:11:43 crc kubenswrapper[4929]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Nov 22 07:11:43 crc kubenswrapper[4929]: --loglevel="${LOGLEVEL}" Nov 22 07:11:43 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:43 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.477634 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:43 crc kubenswrapper[4929]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 22 07:11:43 crc kubenswrapper[4929]: if [[ -f "/env/_master" ]]; then Nov 22 07:11:43 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:43 crc kubenswrapper[4929]: source "/env/_master" Nov 22 07:11:43 crc kubenswrapper[4929]: set +o allexport Nov 22 07:11:43 crc kubenswrapper[4929]: fi Nov 22 07:11:43 crc kubenswrapper[4929]: Nov 22 07:11:43 crc kubenswrapper[4929]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Nov 22 07:11:43 crc kubenswrapper[4929]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 22 07:11:43 crc kubenswrapper[4929]: --disable-webhook \ Nov 22 07:11:43 crc kubenswrapper[4929]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Nov 22 07:11:43 crc kubenswrapper[4929]: --loglevel="${LOGLEVEL}" Nov 22 07:11:43 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:43 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.479353 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.556455 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.556499 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.556511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.556528 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.556542 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.571751 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.571859 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.571974 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572015 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572027 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572078 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572118 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572145 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.572086 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:44.572067427 +0000 UTC m=+41.681521440 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:43 crc kubenswrapper[4929]: E1122 07:11:43.573544 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:44.573476063 +0000 UTC m=+41.682930146 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.659837 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.659921 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.659949 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.659981 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.660006 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.762259 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.762484 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.762543 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.762600 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.762653 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.866490 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.866604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.866615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.866637 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.866651 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.870120 4929 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.870230 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.950259 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.951512 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.953579 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.954661 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.955459 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.956170 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.957079 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.957854 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.958738 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.959476 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.960135 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.961081 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.961805 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.962581 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.962914 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.963312 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.964011 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.964828 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.966544 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.969740 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.969879 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.969946 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.969964 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.969989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.970007 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:43Z","lastTransitionTime":"2025-11-22T07:11:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.971497 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.972827 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.975495 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.976678 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.978936 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.979851 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.981161 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.982550 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.983672 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.985180 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.986417 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.988953 4929 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.989135 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.991297 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.992078 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.993017 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.994662 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.995334 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.995912 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.996597 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 22 07:11:43 crc kubenswrapper[4929]: I1122 07:11:43.998440 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.000167 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.000812 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.001567 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.002304 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.002926 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.003433 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.004036 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.004704 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.005649 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.006251 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.006877 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.007485 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.008099 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.008690 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.009167 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.010623 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.025134 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.041177 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.057874 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.072459 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.074741 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.074807 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.074831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.074864 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.074888 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.087665 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.159292 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.159890 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.162854 4929 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8" exitCode=255 Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.162906 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.162969 4929 scope.go:117] "RemoveContainer" containerID="325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.165129 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3e6758f2607fbd3ab7a6209b759ef80f7f84abd4f1fcd99cc43c37ae64b2d32f"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.166783 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0165ff517326cb16089cd32e433024bb745462ee9312e3b3ca4bd5678c7b02e7"} Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.167497 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:44 crc kubenswrapper[4929]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 22 07:11:44 crc kubenswrapper[4929]: if [[ -f "/env/_master" ]]; then Nov 22 07:11:44 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:44 crc kubenswrapper[4929]: source "/env/_master" Nov 22 07:11:44 crc kubenswrapper[4929]: set +o allexport Nov 22 07:11:44 crc kubenswrapper[4929]: fi Nov 22 07:11:44 crc kubenswrapper[4929]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Nov 22 07:11:44 crc kubenswrapper[4929]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Nov 22 07:11:44 crc kubenswrapper[4929]: ho_enable="--enable-hybrid-overlay" Nov 22 07:11:44 crc kubenswrapper[4929]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Nov 22 07:11:44 crc kubenswrapper[4929]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Nov 22 07:11:44 crc kubenswrapper[4929]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Nov 22 07:11:44 crc kubenswrapper[4929]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 22 07:11:44 crc kubenswrapper[4929]: --webhook-cert-dir="/etc/webhook-cert" \ Nov 22 07:11:44 crc kubenswrapper[4929]: --webhook-host=127.0.0.1 \ Nov 22 07:11:44 crc kubenswrapper[4929]: --webhook-port=9743 \ Nov 22 07:11:44 crc kubenswrapper[4929]: ${ho_enable} \ Nov 22 07:11:44 crc kubenswrapper[4929]: --enable-interconnect \ Nov 22 07:11:44 crc kubenswrapper[4929]: --disable-approver \ Nov 22 07:11:44 crc kubenswrapper[4929]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Nov 22 07:11:44 crc kubenswrapper[4929]: --wait-for-kubernetes-api=200s \ Nov 22 07:11:44 crc kubenswrapper[4929]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Nov 22 07:11:44 crc kubenswrapper[4929]: --loglevel="${LOGLEVEL}" Nov 22 07:11:44 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:44 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.168660 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"9109e5cd5a918b57d864ea46c9cd4b4e79f5c1d2404a8d67b351935513a75448"} Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.169650 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.170075 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:44 crc kubenswrapper[4929]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Nov 22 07:11:44 crc kubenswrapper[4929]: if [[ -f "/env/_master" ]]; then Nov 22 07:11:44 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:44 crc kubenswrapper[4929]: source "/env/_master" Nov 22 07:11:44 crc kubenswrapper[4929]: set +o allexport Nov 22 07:11:44 crc kubenswrapper[4929]: fi Nov 22 07:11:44 crc kubenswrapper[4929]: Nov 22 07:11:44 crc kubenswrapper[4929]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Nov 22 07:11:44 crc kubenswrapper[4929]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Nov 22 07:11:44 crc kubenswrapper[4929]: --disable-webhook \ Nov 22 07:11:44 crc kubenswrapper[4929]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Nov 22 07:11:44 crc kubenswrapper[4929]: --loglevel="${LOGLEVEL}" Nov 22 07:11:44 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:44 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.170851 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.171300 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.171413 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:44 crc kubenswrapper[4929]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Nov 22 07:11:44 crc kubenswrapper[4929]: set -o allexport Nov 22 07:11:44 crc kubenswrapper[4929]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Nov 22 07:11:44 crc kubenswrapper[4929]: source /etc/kubernetes/apiserver-url.env Nov 22 07:11:44 crc kubenswrapper[4929]: else Nov 22 07:11:44 crc kubenswrapper[4929]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Nov 22 07:11:44 crc kubenswrapper[4929]: exit 1 Nov 22 07:11:44 crc kubenswrapper[4929]: fi Nov 22 07:11:44 crc kubenswrapper[4929]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Nov 22 07:11:44 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:44 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.172830 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.177525 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.177649 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.177732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.177815 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.177914 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.179644 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.180029 4929 scope.go:117] "RemoveContainer" containerID="d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.180290 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.193469 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.206961 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.223246 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.241858 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.254647 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.267706 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.279606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.279632 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.279640 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.279653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.279664 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.281361 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.289578 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.300680 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325a661250c6fb0019e2e8209bdeee0ff6a3e62416c31845f4e02887c7f0effa\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:20Z\\\",\\\"message\\\":\\\"W1122 07:11:19.021509 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 07:11:19.021897 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763795479 cert, and key in /tmp/serving-cert-2615518703/serving-signer.crt, /tmp/serving-cert-2615518703/serving-signer.key\\\\nI1122 07:11:19.936407 1 observer_polling.go:159] Starting file observer\\\\nW1122 07:11:19.942797 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 07:11:19.943074 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:19.944384 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2615518703/tls.crt::/tmp/serving-cert-2615518703/tls.key\\\\\\\"\\\\nF1122 07:11:20.208349 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:18Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.311863 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.321834 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.331107 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.348297 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.360931 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.371506 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.381864 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.381938 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.381962 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.381912 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.381999 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.382060 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.396436 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.478474 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.478653 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:11:46.478625381 +0000 UTC m=+43.588079404 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.478754 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.478795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.478891 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.478925 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.478972 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:46.478953099 +0000 UTC m=+43.588407122 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.479028 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:46.478996591 +0000 UTC m=+43.588450644 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.484465 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.484501 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.484524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.484544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.484559 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.579488 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.579535 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579683 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579705 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579721 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579775 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:46.579761582 +0000 UTC m=+43.689215595 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579775 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579830 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579855 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.579969 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:46.579935207 +0000 UTC m=+43.689389280 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.587360 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.587442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.587466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.587493 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.587511 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.689946 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.690004 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.690019 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.690040 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.690093 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.793611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.793700 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.793738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.793769 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.793792 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.896677 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.896727 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.896740 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.896759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.896775 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.946492 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.946604 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.946492 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.946701 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.946763 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:44 crc kubenswrapper[4929]: E1122 07:11:44.946848 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.999889 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.999943 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.999959 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:44 crc kubenswrapper[4929]: I1122 07:11:44.999980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:44.999998 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:44Z","lastTransitionTime":"2025-11-22T07:11:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.102146 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.102199 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.102257 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.102300 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.102316 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.172281 4929 scope.go:117] "RemoveContainer" containerID="d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8" Nov 22 07:11:45 crc kubenswrapper[4929]: E1122 07:11:45.172524 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.190506 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.205511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.205566 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.205583 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.205606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.205654 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.208703 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.223405 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.238372 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.260430 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.277831 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.292760 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.306554 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.308185 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.308248 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.308261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.308277 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.308289 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.317725 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.410484 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.410524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.410538 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.410557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.410571 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.513388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.513472 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.513540 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.513572 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.513600 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.616464 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.616518 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.616535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.616557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.616573 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.718689 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.718724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.718733 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.718746 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.718756 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.821281 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.821372 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.821391 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.821416 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.821433 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.924011 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.924075 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.924095 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.924119 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:45 crc kubenswrapper[4929]: I1122 07:11:45.924139 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:45Z","lastTransitionTime":"2025-11-22T07:11:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.027344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.027398 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.027410 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.027430 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.027442 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.130694 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.130759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.130780 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.130806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.130823 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.178389 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.233376 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.233439 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.233456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.233482 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.233499 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.336708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.336785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.336815 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.336845 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.336865 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.440095 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.440170 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.440193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.440223 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.440247 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.496884 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.497049 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.497109 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:11:50.497077898 +0000 UTC m=+47.606531951 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.497163 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.497196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.497265 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:50.497246123 +0000 UTC m=+47.606700136 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.497401 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.497523 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:50.497494799 +0000 UTC m=+47.606948852 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.542687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.542737 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.542754 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.542775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.542791 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.598160 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.598257 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598433 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598476 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598496 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598511 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598580 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598597 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:50.598555368 +0000 UTC m=+47.708009411 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598608 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.598706 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:50.598672051 +0000 UTC m=+47.708126124 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.646109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.646182 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.646203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.646265 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.646288 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.749020 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.749096 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.749113 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.749137 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.749157 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.851531 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.851755 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.851787 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.851821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.851853 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.946493 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.946630 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.946511 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.946709 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.946494 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:46 crc kubenswrapper[4929]: E1122 07:11:46.946777 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.959360 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.959401 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.959413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.959428 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:46 crc kubenswrapper[4929]: I1122 07:11:46.959441 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:46Z","lastTransitionTime":"2025-11-22T07:11:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.062172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.062198 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.062218 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.062231 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.062240 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.164087 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.164112 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.164120 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.164136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.164144 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.265909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.265945 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.265955 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.265970 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.265981 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.368714 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.368764 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.368774 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.368791 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.368800 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.471628 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.471673 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.471685 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.471703 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.471714 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.573825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.573863 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.573873 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.573887 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.573896 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.675861 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.675902 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.675911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.675926 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.675937 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.778415 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.778459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.778471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.778488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.778501 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.879994 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.880035 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.880048 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.880066 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.880076 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.943230 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-vzgwx"] Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.943506 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-bb6rk"] Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.943686 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bb6rk" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.943693 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947490 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947517 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947568 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947504 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947606 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947650 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947682 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.947999 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.956772 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.963309 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.978637 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.982351 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.982383 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.982392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.982407 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.982416 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:47Z","lastTransitionTime":"2025-11-22T07:11:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.989085 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:47 crc kubenswrapper[4929]: I1122 07:11:47.997247 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.007179 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009427 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-netns\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009468 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-kubelet\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009488 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-multus-daemon-config\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009506 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-multus\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009527 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-k8s-cni-cncf-io\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009577 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-multus-certs\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009662 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-hostroot\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009722 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/73f3e50e-3229-46e8-969b-e023922fdbce-hosts-file\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009738 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-etc-kubernetes\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009753 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwc8g\" (UniqueName: \"kubernetes.io/projected/763619b4-b584-4089-bd56-96823e22e25e-kube-api-access-zwc8g\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009769 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-bin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009785 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-conf-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009802 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-system-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009816 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-cnibin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009835 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-socket-dir-parent\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009924 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-cni-binary-copy\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009967 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p26mb\" (UniqueName: \"kubernetes.io/projected/73f3e50e-3229-46e8-969b-e023922fdbce-kube-api-access-p26mb\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.009985 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-os-release\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.016518 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.026262 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.037570 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.046968 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.055633 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.064673 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.070845 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.084361 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.084568 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.084646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.084721 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.084779 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.086860 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.096125 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.109187 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110585 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p26mb\" (UniqueName: \"kubernetes.io/projected/73f3e50e-3229-46e8-969b-e023922fdbce-kube-api-access-p26mb\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110639 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-os-release\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110674 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-cni-binary-copy\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110706 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-netns\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110739 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-kubelet\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110761 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-os-release\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110772 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-multus-daemon-config\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110800 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-netns\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110855 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-multus\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110828 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-multus\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110935 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-k8s-cni-cncf-io\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110956 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-multus-certs\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/73f3e50e-3229-46e8-969b-e023922fdbce-hosts-file\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.110966 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-kubelet\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111049 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/73f3e50e-3229-46e8-969b-e023922fdbce-hosts-file\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111050 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-multus-certs\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111005 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-run-k8s-cni-cncf-io\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111003 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111137 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111154 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-hostroot\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111245 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-hostroot\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111249 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-etc-kubernetes\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111308 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-etc-kubernetes\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111302 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwc8g\" (UniqueName: \"kubernetes.io/projected/763619b4-b584-4089-bd56-96823e22e25e-kube-api-access-zwc8g\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111347 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-conf-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111366 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-bin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111383 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-system-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111399 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-cnibin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111415 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-socket-dir-parent\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111448 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-host-var-lib-cni-bin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111464 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-socket-dir-parent\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111472 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-multus-conf-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111505 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-cnibin\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111510 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/763619b4-b584-4089-bd56-96823e22e25e-system-cni-dir\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111595 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-cni-binary-copy\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.111846 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/763619b4-b584-4089-bd56-96823e22e25e-multus-daemon-config\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.120632 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.129447 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p26mb\" (UniqueName: \"kubernetes.io/projected/73f3e50e-3229-46e8-969b-e023922fdbce-kube-api-access-p26mb\") pod \"node-resolver-vzgwx\" (UID: \"73f3e50e-3229-46e8-969b-e023922fdbce\") " pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.130681 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwc8g\" (UniqueName: \"kubernetes.io/projected/763619b4-b584-4089-bd56-96823e22e25e-kube-api-access-zwc8g\") pod \"multus-bb6rk\" (UID: \"763619b4-b584-4089-bd56-96823e22e25e\") " pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.135540 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.147032 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.159755 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.168918 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.180485 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.186496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.186543 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.186554 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.186572 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.186581 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.256609 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-bb6rk" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.262942 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-vzgwx" Nov 22 07:11:48 crc kubenswrapper[4929]: W1122 07:11:48.269870 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod763619b4_b584_4089_bd56_96823e22e25e.slice/crio-cf2cf3463187ff034a73a163a9fd5099d3fd38de0b0e13c11c024239b1bf03e7 WatchSource:0}: Error finding container cf2cf3463187ff034a73a163a9fd5099d3fd38de0b0e13c11c024239b1bf03e7: Status 404 returned error can't find the container with id cf2cf3463187ff034a73a163a9fd5099d3fd38de0b0e13c11c024239b1bf03e7 Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.272862 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:48 crc kubenswrapper[4929]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Nov 22 07:11:48 crc kubenswrapper[4929]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Nov 22 07:11:48 crc kubenswrapper[4929]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zwc8g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-bb6rk_openshift-multus(763619b4-b584-4089-bd56-96823e22e25e): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:48 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.274390 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-bb6rk" podUID="763619b4-b584-4089-bd56-96823e22e25e" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.277360 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-dssfx"] Nov 22 07:11:48 crc kubenswrapper[4929]: W1122 07:11:48.277950 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73f3e50e_3229_46e8_969b_e023922fdbce.slice/crio-5738ef4aa4fce1d6bfd6452997e893958ee4a69abb5dad420d38933239b70760 WatchSource:0}: Error finding container 5738ef4aa4fce1d6bfd6452997e893958ee4a69abb5dad420d38933239b70760: Status 404 returned error can't find the container with id 5738ef4aa4fce1d6bfd6452997e893958ee4a69abb5dad420d38933239b70760 Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.278148 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.279233 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-mtn8z"] Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.279794 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.281341 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:48 crc kubenswrapper[4929]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Nov 22 07:11:48 crc kubenswrapper[4929]: set -uo pipefail Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Nov 22 07:11:48 crc kubenswrapper[4929]: HOSTS_FILE="/etc/hosts" Nov 22 07:11:48 crc kubenswrapper[4929]: TEMP_FILE="/etc/hosts.tmp" Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: IFS=', ' read -r -a services <<< "${SERVICES}" Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: # Make a temporary file with the old hosts file's attributes. Nov 22 07:11:48 crc kubenswrapper[4929]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Nov 22 07:11:48 crc kubenswrapper[4929]: echo "Failed to preserve hosts file. Exiting." Nov 22 07:11:48 crc kubenswrapper[4929]: exit 1 Nov 22 07:11:48 crc kubenswrapper[4929]: fi Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: while true; do Nov 22 07:11:48 crc kubenswrapper[4929]: declare -A svc_ips Nov 22 07:11:48 crc kubenswrapper[4929]: for svc in "${services[@]}"; do Nov 22 07:11:48 crc kubenswrapper[4929]: # Fetch service IP from cluster dns if present. We make several tries Nov 22 07:11:48 crc kubenswrapper[4929]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Nov 22 07:11:48 crc kubenswrapper[4929]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Nov 22 07:11:48 crc kubenswrapper[4929]: # support UDP loadbalancers and require reaching DNS through TCP. Nov 22 07:11:48 crc kubenswrapper[4929]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:48 crc kubenswrapper[4929]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:48 crc kubenswrapper[4929]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:48 crc kubenswrapper[4929]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Nov 22 07:11:48 crc kubenswrapper[4929]: for i in ${!cmds[*]} Nov 22 07:11:48 crc kubenswrapper[4929]: do Nov 22 07:11:48 crc kubenswrapper[4929]: ips=($(eval "${cmds[i]}")) Nov 22 07:11:48 crc kubenswrapper[4929]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Nov 22 07:11:48 crc kubenswrapper[4929]: svc_ips["${svc}"]="${ips[@]}" Nov 22 07:11:48 crc kubenswrapper[4929]: break Nov 22 07:11:48 crc kubenswrapper[4929]: fi Nov 22 07:11:48 crc kubenswrapper[4929]: done Nov 22 07:11:48 crc kubenswrapper[4929]: done Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: # Update /etc/hosts only if we get valid service IPs Nov 22 07:11:48 crc kubenswrapper[4929]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Nov 22 07:11:48 crc kubenswrapper[4929]: # Stale entries could exist in /etc/hosts if the service is deleted Nov 22 07:11:48 crc kubenswrapper[4929]: if [[ -n "${svc_ips[*]-}" ]]; then Nov 22 07:11:48 crc kubenswrapper[4929]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Nov 22 07:11:48 crc kubenswrapper[4929]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Nov 22 07:11:48 crc kubenswrapper[4929]: # Only continue rebuilding the hosts entries if its original content is preserved Nov 22 07:11:48 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:48 crc kubenswrapper[4929]: continue Nov 22 07:11:48 crc kubenswrapper[4929]: fi Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: # Append resolver entries for services Nov 22 07:11:48 crc kubenswrapper[4929]: rc=0 Nov 22 07:11:48 crc kubenswrapper[4929]: for svc in "${!svc_ips[@]}"; do Nov 22 07:11:48 crc kubenswrapper[4929]: for ip in ${svc_ips[${svc}]}; do Nov 22 07:11:48 crc kubenswrapper[4929]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Nov 22 07:11:48 crc kubenswrapper[4929]: done Nov 22 07:11:48 crc kubenswrapper[4929]: done Nov 22 07:11:48 crc kubenswrapper[4929]: if [[ $rc -ne 0 ]]; then Nov 22 07:11:48 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:48 crc kubenswrapper[4929]: continue Nov 22 07:11:48 crc kubenswrapper[4929]: fi Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: Nov 22 07:11:48 crc kubenswrapper[4929]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Nov 22 07:11:48 crc kubenswrapper[4929]: # Replace /etc/hosts with our modified version if needed Nov 22 07:11:48 crc kubenswrapper[4929]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Nov 22 07:11:48 crc kubenswrapper[4929]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Nov 22 07:11:48 crc kubenswrapper[4929]: fi Nov 22 07:11:48 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:48 crc kubenswrapper[4929]: unset svc_ips Nov 22 07:11:48 crc kubenswrapper[4929]: done Nov 22 07:11:48 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p26mb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-vzgwx_openshift-dns(73f3e50e-3229-46e8-969b-e023922fdbce): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:48 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.281560 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.281663 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.281803 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.281927 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.282097 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.282485 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-vzgwx" podUID="73f3e50e-3229-46e8-969b-e023922fdbce" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.282643 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.282663 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.285331 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vkn7m"] Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.286316 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.287683 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288119 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288125 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288559 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288587 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288628 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.288615 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.289821 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.291280 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.292634 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.298331 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.307844 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.318509 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.325825 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.340568 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.350019 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.359979 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.369457 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.378125 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.386009 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.390857 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.390892 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.390905 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.390925 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.390939 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.396259 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.406694 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413461 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413517 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-system-cni-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413552 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413585 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/470531cb-120c-48d9-80e1-adf074cf3055-proxy-tls\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413617 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413649 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413682 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413711 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413739 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4rhl\" (UniqueName: \"kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413823 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413856 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.413885 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414001 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414070 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414090 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414163 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-binary-copy\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414204 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-os-release\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414264 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clts9\" (UniqueName: \"kubernetes.io/projected/78d8d828-48aa-4499-a4af-54e0dd754349-kube-api-access-clts9\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414297 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414333 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414363 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414398 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414432 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414464 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-cnibin\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414506 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/470531cb-120c-48d9-80e1-adf074cf3055-mcd-auth-proxy-config\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414565 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/470531cb-120c-48d9-80e1-adf074cf3055-rootfs\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414599 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97j5q\" (UniqueName: \"kubernetes.io/projected/470531cb-120c-48d9-80e1-adf074cf3055-kube-api-access-97j5q\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414630 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414657 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.414685 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.418810 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.428193 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.436382 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.450772 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.467988 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.481683 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.493544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.493581 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.493594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.493609 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.493619 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.506971 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.513701 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515117 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515145 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515163 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515181 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515202 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515242 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515260 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515267 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515296 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515267 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515281 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-binary-copy\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515302 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515342 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515351 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-os-release\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515322 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515386 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clts9\" (UniqueName: \"kubernetes.io/projected/78d8d828-48aa-4499-a4af-54e0dd754349-kube-api-access-clts9\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515409 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515422 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-os-release\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515428 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515449 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515451 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515474 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515486 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515506 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515488 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515525 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-cnibin\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515507 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/470531cb-120c-48d9-80e1-adf074cf3055-mcd-auth-proxy-config\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515561 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515589 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-cnibin\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515594 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/470531cb-120c-48d9-80e1-adf074cf3055-rootfs\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515613 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/470531cb-120c-48d9-80e1-adf074cf3055-rootfs\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515634 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97j5q\" (UniqueName: \"kubernetes.io/projected/470531cb-120c-48d9-80e1-adf074cf3055-kube-api-access-97j5q\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515656 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515671 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515689 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515705 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515730 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515761 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-system-cni-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515798 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515822 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/470531cb-120c-48d9-80e1-adf074cf3055-proxy-tls\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515842 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515860 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515887 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-binary-copy\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515889 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515922 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515942 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4rhl\" (UniqueName: \"kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515985 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516136 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.515842 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-system-cni-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516463 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/470531cb-120c-48d9-80e1-adf074cf3055-mcd-auth-proxy-config\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516489 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516509 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516566 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/78d8d828-48aa-4499-a4af-54e0dd754349-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516589 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/78d8d828-48aa-4499-a4af-54e0dd754349-tuning-conf-dir\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.516600 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.519388 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/470531cb-120c-48d9-80e1-adf074cf3055-proxy-tls\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.520114 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.525873 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.533331 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4rhl\" (UniqueName: \"kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl\") pod \"ovnkube-node-vkn7m\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.534006 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97j5q\" (UniqueName: \"kubernetes.io/projected/470531cb-120c-48d9-80e1-adf074cf3055-kube-api-access-97j5q\") pod \"machine-config-daemon-dssfx\" (UID: \"470531cb-120c-48d9-80e1-adf074cf3055\") " pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.535604 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clts9\" (UniqueName: \"kubernetes.io/projected/78d8d828-48aa-4499-a4af-54e0dd754349-kube-api-access-clts9\") pod \"multus-additional-cni-plugins-mtn8z\" (UID: \"78d8d828-48aa-4499-a4af-54e0dd754349\") " pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.542999 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.550560 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.558104 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.566105 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.573509 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.593717 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.598330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.598367 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.598381 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.598399 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.598412 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.600600 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.605787 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.609583 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-97j5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.613164 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-97j5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.614461 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:11:48 crc kubenswrapper[4929]: W1122 07:11:48.617514 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78d8d828_48aa_4499_a4af_54e0dd754349.slice/crio-0f38681fcc25a9c01a5e09af5bc4c93bd91c4afe69314b18263f897c4d8ae549 WatchSource:0}: Error finding container 0f38681fcc25a9c01a5e09af5bc4c93bd91c4afe69314b18263f897c4d8ae549: Status 404 returned error can't find the container with id 0f38681fcc25a9c01a5e09af5bc4c93bd91c4afe69314b18263f897c4d8ae549 Nov 22 07:11:48 crc kubenswrapper[4929]: W1122 07:11:48.618677 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77273e11_9bf5_4480_bf99_266ae3f1ed7a.slice/crio-97170fcf4b1a959dfcdd406ec04c18cb99631279bef967842a66051a4dff40e8 WatchSource:0}: Error finding container 97170fcf4b1a959dfcdd406ec04c18cb99631279bef967842a66051a4dff40e8: Status 404 returned error can't find the container with id 97170fcf4b1a959dfcdd406ec04c18cb99631279bef967842a66051a4dff40e8 Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.619338 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clts9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-mtn8z_openshift-multus(78d8d828-48aa-4499-a4af-54e0dd754349): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.621451 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" podUID="78d8d828-48aa-4499-a4af-54e0dd754349" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.621667 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:48 crc kubenswrapper[4929]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Nov 22 07:11:48 crc kubenswrapper[4929]: apiVersion: v1 Nov 22 07:11:48 crc kubenswrapper[4929]: clusters: Nov 22 07:11:48 crc kubenswrapper[4929]: - cluster: Nov 22 07:11:48 crc kubenswrapper[4929]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Nov 22 07:11:48 crc kubenswrapper[4929]: server: https://api-int.crc.testing:6443 Nov 22 07:11:48 crc kubenswrapper[4929]: name: default-cluster Nov 22 07:11:48 crc kubenswrapper[4929]: contexts: Nov 22 07:11:48 crc kubenswrapper[4929]: - context: Nov 22 07:11:48 crc kubenswrapper[4929]: cluster: default-cluster Nov 22 07:11:48 crc kubenswrapper[4929]: namespace: default Nov 22 07:11:48 crc kubenswrapper[4929]: user: default-auth Nov 22 07:11:48 crc kubenswrapper[4929]: name: default-context Nov 22 07:11:48 crc kubenswrapper[4929]: current-context: default-context Nov 22 07:11:48 crc kubenswrapper[4929]: kind: Config Nov 22 07:11:48 crc kubenswrapper[4929]: preferences: {} Nov 22 07:11:48 crc kubenswrapper[4929]: users: Nov 22 07:11:48 crc kubenswrapper[4929]: - name: default-auth Nov 22 07:11:48 crc kubenswrapper[4929]: user: Nov 22 07:11:48 crc kubenswrapper[4929]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 22 07:11:48 crc kubenswrapper[4929]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 22 07:11:48 crc kubenswrapper[4929]: EOF Nov 22 07:11:48 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j4rhl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:48 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.623561 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.700938 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.701132 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.701374 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.701530 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.701668 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.804984 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.805018 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.805030 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.805050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.805063 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.907480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.907521 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.907531 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.907547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.907557 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:48Z","lastTransitionTime":"2025-11-22T07:11:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.947083 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.947083 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.947192 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.947273 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:48 crc kubenswrapper[4929]: I1122 07:11:48.947097 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:48 crc kubenswrapper[4929]: E1122 07:11:48.947355 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.009987 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.010027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.010039 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.010055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.010072 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.112276 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.112307 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.112315 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.112327 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.112336 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.189176 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"0f38681fcc25a9c01a5e09af5bc4c93bd91c4afe69314b18263f897c4d8ae549"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.189823 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"97170fcf4b1a959dfcdd406ec04c18cb99631279bef967842a66051a4dff40e8"} Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.191181 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:egress-router-binary-copy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,Command:[/entrypoint/cnibincopy.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/egress-router-cni/bin/,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:true,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-clts9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-additional-cni-plugins-mtn8z_openshift-multus(78d8d828-48aa-4499-a4af-54e0dd754349): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.191487 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vzgwx" event={"ID":"73f3e50e-3229-46e8-969b-e023922fdbce","Type":"ContainerStarted","Data":"5738ef4aa4fce1d6bfd6452997e893958ee4a69abb5dad420d38933239b70760"} Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.191983 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:49 crc kubenswrapper[4929]: init container &Container{Name:kubecfg-setup,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c cat << EOF > /etc/ovn/kubeconfig Nov 22 07:11:49 crc kubenswrapper[4929]: apiVersion: v1 Nov 22 07:11:49 crc kubenswrapper[4929]: clusters: Nov 22 07:11:49 crc kubenswrapper[4929]: - cluster: Nov 22 07:11:49 crc kubenswrapper[4929]: certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt Nov 22 07:11:49 crc kubenswrapper[4929]: server: https://api-int.crc.testing:6443 Nov 22 07:11:49 crc kubenswrapper[4929]: name: default-cluster Nov 22 07:11:49 crc kubenswrapper[4929]: contexts: Nov 22 07:11:49 crc kubenswrapper[4929]: - context: Nov 22 07:11:49 crc kubenswrapper[4929]: cluster: default-cluster Nov 22 07:11:49 crc kubenswrapper[4929]: namespace: default Nov 22 07:11:49 crc kubenswrapper[4929]: user: default-auth Nov 22 07:11:49 crc kubenswrapper[4929]: name: default-context Nov 22 07:11:49 crc kubenswrapper[4929]: current-context: default-context Nov 22 07:11:49 crc kubenswrapper[4929]: kind: Config Nov 22 07:11:49 crc kubenswrapper[4929]: preferences: {} Nov 22 07:11:49 crc kubenswrapper[4929]: users: Nov 22 07:11:49 crc kubenswrapper[4929]: - name: default-auth Nov 22 07:11:49 crc kubenswrapper[4929]: user: Nov 22 07:11:49 crc kubenswrapper[4929]: client-certificate: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 22 07:11:49 crc kubenswrapper[4929]: client-key: /etc/ovn/ovnkube-node-certs/ovnkube-client-current.pem Nov 22 07:11:49 crc kubenswrapper[4929]: EOF Nov 22 07:11:49 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-openvswitch,ReadOnly:false,MountPath:/etc/ovn/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j4rhl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:49 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.192346 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"egress-router-binary-copy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" podUID="78d8d828-48aa-4499-a4af-54e0dd754349" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.193081 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kubecfg-setup\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.193378 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerStarted","Data":"cf2cf3463187ff034a73a163a9fd5099d3fd38de0b0e13c11c024239b1bf03e7"} Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.193389 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:49 crc kubenswrapper[4929]: container &Container{Name:dns-node-resolver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/bin/bash -c #!/bin/bash Nov 22 07:11:49 crc kubenswrapper[4929]: set -uo pipefail Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: trap 'jobs -p | xargs kill || true; wait; exit 0' TERM Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: OPENSHIFT_MARKER="openshift-generated-node-resolver" Nov 22 07:11:49 crc kubenswrapper[4929]: HOSTS_FILE="/etc/hosts" Nov 22 07:11:49 crc kubenswrapper[4929]: TEMP_FILE="/etc/hosts.tmp" Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: IFS=', ' read -r -a services <<< "${SERVICES}" Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: # Make a temporary file with the old hosts file's attributes. Nov 22 07:11:49 crc kubenswrapper[4929]: if ! cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"; then Nov 22 07:11:49 crc kubenswrapper[4929]: echo "Failed to preserve hosts file. Exiting." Nov 22 07:11:49 crc kubenswrapper[4929]: exit 1 Nov 22 07:11:49 crc kubenswrapper[4929]: fi Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: while true; do Nov 22 07:11:49 crc kubenswrapper[4929]: declare -A svc_ips Nov 22 07:11:49 crc kubenswrapper[4929]: for svc in "${services[@]}"; do Nov 22 07:11:49 crc kubenswrapper[4929]: # Fetch service IP from cluster dns if present. We make several tries Nov 22 07:11:49 crc kubenswrapper[4929]: # to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones Nov 22 07:11:49 crc kubenswrapper[4929]: # are for deployments with Kuryr on older OpenStack (OSP13) - those do not Nov 22 07:11:49 crc kubenswrapper[4929]: # support UDP loadbalancers and require reaching DNS through TCP. Nov 22 07:11:49 crc kubenswrapper[4929]: cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:49 crc kubenswrapper[4929]: 'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:49 crc kubenswrapper[4929]: 'dig -t A +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"' Nov 22 07:11:49 crc kubenswrapper[4929]: 'dig -t AAAA +tcp +retry=0 @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"|grep -v "^;"') Nov 22 07:11:49 crc kubenswrapper[4929]: for i in ${!cmds[*]} Nov 22 07:11:49 crc kubenswrapper[4929]: do Nov 22 07:11:49 crc kubenswrapper[4929]: ips=($(eval "${cmds[i]}")) Nov 22 07:11:49 crc kubenswrapper[4929]: if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then Nov 22 07:11:49 crc kubenswrapper[4929]: svc_ips["${svc}"]="${ips[@]}" Nov 22 07:11:49 crc kubenswrapper[4929]: break Nov 22 07:11:49 crc kubenswrapper[4929]: fi Nov 22 07:11:49 crc kubenswrapper[4929]: done Nov 22 07:11:49 crc kubenswrapper[4929]: done Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: # Update /etc/hosts only if we get valid service IPs Nov 22 07:11:49 crc kubenswrapper[4929]: # We will not update /etc/hosts when there is coredns service outage or api unavailability Nov 22 07:11:49 crc kubenswrapper[4929]: # Stale entries could exist in /etc/hosts if the service is deleted Nov 22 07:11:49 crc kubenswrapper[4929]: if [[ -n "${svc_ips[*]-}" ]]; then Nov 22 07:11:49 crc kubenswrapper[4929]: # Build a new hosts file from /etc/hosts with our custom entries filtered out Nov 22 07:11:49 crc kubenswrapper[4929]: if ! sed --silent "/# ${OPENSHIFT_MARKER}/d; w ${TEMP_FILE}" "${HOSTS_FILE}"; then Nov 22 07:11:49 crc kubenswrapper[4929]: # Only continue rebuilding the hosts entries if its original content is preserved Nov 22 07:11:49 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:49 crc kubenswrapper[4929]: continue Nov 22 07:11:49 crc kubenswrapper[4929]: fi Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: # Append resolver entries for services Nov 22 07:11:49 crc kubenswrapper[4929]: rc=0 Nov 22 07:11:49 crc kubenswrapper[4929]: for svc in "${!svc_ips[@]}"; do Nov 22 07:11:49 crc kubenswrapper[4929]: for ip in ${svc_ips[${svc}]}; do Nov 22 07:11:49 crc kubenswrapper[4929]: echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}" || rc=$? Nov 22 07:11:49 crc kubenswrapper[4929]: done Nov 22 07:11:49 crc kubenswrapper[4929]: done Nov 22 07:11:49 crc kubenswrapper[4929]: if [[ $rc -ne 0 ]]; then Nov 22 07:11:49 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:49 crc kubenswrapper[4929]: continue Nov 22 07:11:49 crc kubenswrapper[4929]: fi Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: Nov 22 07:11:49 crc kubenswrapper[4929]: # TODO: Update /etc/hosts atomically to avoid any inconsistent behavior Nov 22 07:11:49 crc kubenswrapper[4929]: # Replace /etc/hosts with our modified version if needed Nov 22 07:11:49 crc kubenswrapper[4929]: cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}" Nov 22 07:11:49 crc kubenswrapper[4929]: # TEMP_FILE is not removed to avoid file create/delete and attributes copy churn Nov 22 07:11:49 crc kubenswrapper[4929]: fi Nov 22 07:11:49 crc kubenswrapper[4929]: sleep 60 & wait Nov 22 07:11:49 crc kubenswrapper[4929]: unset svc_ips Nov 22 07:11:49 crc kubenswrapper[4929]: done Nov 22 07:11:49 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:SERVICES,Value:image-registry.openshift-image-registry.svc,ValueFrom:nil,},EnvVar{Name:NAMESERVER,Value:10.217.4.10,ValueFrom:nil,},EnvVar{Name:CLUSTER_DOMAIN,Value:cluster.local,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{22020096 0} {} 21Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:hosts-file,ReadOnly:false,MountPath:/etc/hosts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p26mb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-resolver-vzgwx_openshift-dns(73f3e50e-3229-46e8-969b-e023922fdbce): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:49 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.194715 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:49 crc kubenswrapper[4929]: container &Container{Name:kube-multus,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,Command:[/bin/bash -ec --],Args:[MULTUS_DAEMON_OPT="" Nov 22 07:11:49 crc kubenswrapper[4929]: /entrypoint/cnibincopy.sh; exec /usr/src/multus-cni/bin/multus-daemon $MULTUS_DAEMON_OPT Nov 22 07:11:49 crc kubenswrapper[4929]: ],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:RHEL8_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel8/bin/,ValueFrom:nil,},EnvVar{Name:RHEL9_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/rhel9/bin/,ValueFrom:nil,},EnvVar{Name:DEFAULT_SOURCE_DIRECTORY,Value:/usr/src/multus-cni/bin/,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_PORT,Value:6443,ValueFrom:nil,},EnvVar{Name:KUBERNETES_SERVICE_HOST,Value:api-int.crc.testing,ValueFrom:nil,},EnvVar{Name:MULTUS_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:K8S_NODE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cni-binary-copy,ReadOnly:false,MountPath:/entrypoint,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:os-release,ReadOnly:false,MountPath:/host/etc/os-release,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:system-cni-dir,ReadOnly:false,MountPath:/host/etc/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-cni-dir,ReadOnly:false,MountPath:/host/run/multus/cni/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cnibin,ReadOnly:false,MountPath:/host/opt/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-socket-dir-parent,ReadOnly:false,MountPath:/host/run/multus,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-k8s-cni-cncf-io,ReadOnly:false,MountPath:/run/k8s.cni.cncf.io,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-netns,ReadOnly:false,MountPath:/run/netns,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-bin,ReadOnly:false,MountPath:/var/lib/cni/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-cni-multus,ReadOnly:false,MountPath:/var/lib/cni/multus,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-var-lib-kubelet,ReadOnly:false,MountPath:/var/lib/kubelet,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:hostroot,ReadOnly:false,MountPath:/hostroot,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-conf-dir,ReadOnly:false,MountPath:/etc/cni/multus/net.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:multus-daemon-config,ReadOnly:true,MountPath:/etc/cni/net.d/multus.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-run-multus-certs,ReadOnly:false,MountPath:/etc/cni/multus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-kubernetes,ReadOnly:false,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zwc8g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod multus-bb6rk_openshift-multus(763619b4-b584-4089-bd56-96823e22e25e): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:49 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.195013 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dns-node-resolver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-dns/node-resolver-vzgwx" podUID="73f3e50e-3229-46e8-969b-e023922fdbce" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.195149 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"ebadaf163ac855b0362b8af6808f9c7248aa514d27d3d51375f398398e6073ac"} Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.195883 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-multus/multus-bb6rk" podUID="763619b4-b584-4089-bd56-96823e22e25e" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.196068 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:machine-config-daemon,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a,Command:[/usr/bin/machine-config-daemon],Args:[start --payload-version=4.18.1],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:health,HostPort:8798,ContainerPort:8798,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:rootfs,ReadOnly:false,MountPath:/rootfs,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-97j5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/health,Port:{0 8798 },Host:127.0.0.1,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:120,TimeoutSeconds:1,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.198694 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,Command:[],Args:[--secure-listen-address=0.0.0.0:9001 --config-file=/etc/kube-rbac-proxy/config-file.yaml --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 --tls-min-version=VersionTLS12 --upstream=http://127.0.0.1:8797 --logtostderr=true --tls-cert-file=/etc/tls/private/tls.crt --tls-private-key-file=/etc/tls/private/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:9001,ContainerPort:9001,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:proxy-tls,ReadOnly:false,MountPath:/etc/tls/private,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:mcd-auth-proxy-config,ReadOnly:false,MountPath:/etc/kube-rbac-proxy,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-97j5q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Nov 22 07:11:49 crc kubenswrapper[4929]: E1122 07:11:49.199953 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"machine-config-daemon\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.204847 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214142 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214152 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214167 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214178 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.214564 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.230366 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.245258 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.256537 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.265071 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.272817 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.277929 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.287445 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.295487 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.303372 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.309519 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.316662 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.316699 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.316709 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.316724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.316737 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.318587 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.331897 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.341107 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.349599 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.359533 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.368419 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.377712 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.391448 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.402545 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.411927 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.419655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.419699 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.419710 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.419729 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.419743 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.421925 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.429588 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.441523 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.458934 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.493720 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.522602 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.522643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.522655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.522670 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.522680 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.532262 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.625544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.625594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.625611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.625627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.625638 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.727226 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.727262 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.727271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.727287 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.727297 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.829637 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.829695 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.829716 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.829738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.829755 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.932731 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.932776 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.932785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.932801 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:49 crc kubenswrapper[4929]: I1122 07:11:49.932814 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:49Z","lastTransitionTime":"2025-11-22T07:11:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.035725 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.035791 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.035806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.035828 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.035844 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.138394 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.138433 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.138443 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.138462 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.138475 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.240506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.240550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.240560 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.240577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.240587 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.343537 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.343580 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.343590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.343607 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.343617 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.446252 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.446331 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.446351 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.446370 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.446384 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.533524 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.533640 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.533671 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.533745 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:11:58.533730327 +0000 UTC m=+55.643184340 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.533797 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.533888 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:58.53386777 +0000 UTC m=+55.643321793 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.533973 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.534114 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:58.534085736 +0000 UTC m=+55.643539789 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.549027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.549069 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.549079 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.549093 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.549103 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.634873 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.634988 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635045 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635113 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635133 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635246 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:58.635191606 +0000 UTC m=+55.744645629 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635287 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635332 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635348 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.635455 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:11:58.635431152 +0000 UTC m=+55.744885175 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.651664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.651742 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.651756 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.651775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.651786 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.754485 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.754536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.754547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.754571 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.754583 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.800416 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-pvjvh"] Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.801101 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.804149 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.804365 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.804400 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.804846 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.817099 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.830809 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.844443 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.856929 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.857017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.857036 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.857055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.857073 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.858250 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.871241 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.879418 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.890535 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.902455 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.918117 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.936798 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.938043 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-host\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.938084 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn9pl\" (UniqueName: \"kubernetes.io/projected/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-kube-api-access-dn9pl\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.938140 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-serviceca\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.946441 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.946485 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.946565 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.946444 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.946688 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:50 crc kubenswrapper[4929]: E1122 07:11:50.946875 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.957662 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.959193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.959289 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.959303 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.959322 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.959337 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:50Z","lastTransitionTime":"2025-11-22T07:11:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.972900 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:50 crc kubenswrapper[4929]: I1122 07:11:50.989845 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.007132 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.017480 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.039201 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-serviceca\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.039329 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-host\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.039350 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn9pl\" (UniqueName: \"kubernetes.io/projected/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-kube-api-access-dn9pl\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.039421 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-host\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.040197 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-serviceca\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.059935 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn9pl\" (UniqueName: \"kubernetes.io/projected/f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416-kube-api-access-dn9pl\") pod \"node-ca-pvjvh\" (UID: \"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\") " pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.062444 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.062501 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.062528 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.062557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.062579 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.113124 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-pvjvh" Nov 22 07:11:51 crc kubenswrapper[4929]: W1122 07:11:51.126310 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf24bc88f_796b_4ca1_a2a0_fb7cc3f3c416.slice/crio-6942f62a8f9e92ae6ecdc29cd67883bb876b5b32560fe7b8c5fe6eff0bd7975d WatchSource:0}: Error finding container 6942f62a8f9e92ae6ecdc29cd67883bb876b5b32560fe7b8c5fe6eff0bd7975d: Status 404 returned error can't find the container with id 6942f62a8f9e92ae6ecdc29cd67883bb876b5b32560fe7b8c5fe6eff0bd7975d Nov 22 07:11:51 crc kubenswrapper[4929]: E1122 07:11:51.128511 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:51 crc kubenswrapper[4929]: container &Container{Name:node-ca,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f,Command:[/bin/sh -c trap 'jobs -p | xargs -r kill; echo shutting down node-ca; exit 0' TERM Nov 22 07:11:51 crc kubenswrapper[4929]: while [ true ]; Nov 22 07:11:51 crc kubenswrapper[4929]: do Nov 22 07:11:51 crc kubenswrapper[4929]: for f in $(ls /tmp/serviceca); do Nov 22 07:11:51 crc kubenswrapper[4929]: echo $f Nov 22 07:11:51 crc kubenswrapper[4929]: ca_file_path="/tmp/serviceca/${f}" Nov 22 07:11:51 crc kubenswrapper[4929]: f=$(echo $f | sed -r 's/(.*)\.\./\1:/') Nov 22 07:11:51 crc kubenswrapper[4929]: reg_dir_path="/etc/docker/certs.d/${f}" Nov 22 07:11:51 crc kubenswrapper[4929]: if [ -e "${reg_dir_path}" ]; then Nov 22 07:11:51 crc kubenswrapper[4929]: cp -u $ca_file_path $reg_dir_path/ca.crt Nov 22 07:11:51 crc kubenswrapper[4929]: else Nov 22 07:11:51 crc kubenswrapper[4929]: mkdir $reg_dir_path Nov 22 07:11:51 crc kubenswrapper[4929]: cp $ca_file_path $reg_dir_path/ca.crt Nov 22 07:11:51 crc kubenswrapper[4929]: fi Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: for d in $(ls /etc/docker/certs.d); do Nov 22 07:11:51 crc kubenswrapper[4929]: echo $d Nov 22 07:11:51 crc kubenswrapper[4929]: dp=$(echo $d | sed -r 's/(.*):/\1\.\./') Nov 22 07:11:51 crc kubenswrapper[4929]: reg_conf_path="/tmp/serviceca/${dp}" Nov 22 07:11:51 crc kubenswrapper[4929]: if [ ! -e "${reg_conf_path}" ]; then Nov 22 07:11:51 crc kubenswrapper[4929]: rm -rf /etc/docker/certs.d/$d Nov 22 07:11:51 crc kubenswrapper[4929]: fi Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: sleep 60 & wait ${!} Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{10485760 0} {} 10Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serviceca,ReadOnly:false,MountPath:/tmp/serviceca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host,ReadOnly:false,MountPath:/etc/docker/certs.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dn9pl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*0,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-ca-pvjvh_openshift-image-registry(f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:51 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:51 crc kubenswrapper[4929]: E1122 07:11:51.129941 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"node-ca\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-image-registry/node-ca-pvjvh" podUID="f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.164640 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.164692 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.164706 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.164726 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.164738 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.201383 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-pvjvh" event={"ID":"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416","Type":"ContainerStarted","Data":"6942f62a8f9e92ae6ecdc29cd67883bb876b5b32560fe7b8c5fe6eff0bd7975d"} Nov 22 07:11:51 crc kubenswrapper[4929]: E1122 07:11:51.202893 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:11:51 crc kubenswrapper[4929]: container &Container{Name:node-ca,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f,Command:[/bin/sh -c trap 'jobs -p | xargs -r kill; echo shutting down node-ca; exit 0' TERM Nov 22 07:11:51 crc kubenswrapper[4929]: while [ true ]; Nov 22 07:11:51 crc kubenswrapper[4929]: do Nov 22 07:11:51 crc kubenswrapper[4929]: for f in $(ls /tmp/serviceca); do Nov 22 07:11:51 crc kubenswrapper[4929]: echo $f Nov 22 07:11:51 crc kubenswrapper[4929]: ca_file_path="/tmp/serviceca/${f}" Nov 22 07:11:51 crc kubenswrapper[4929]: f=$(echo $f | sed -r 's/(.*)\.\./\1:/') Nov 22 07:11:51 crc kubenswrapper[4929]: reg_dir_path="/etc/docker/certs.d/${f}" Nov 22 07:11:51 crc kubenswrapper[4929]: if [ -e "${reg_dir_path}" ]; then Nov 22 07:11:51 crc kubenswrapper[4929]: cp -u $ca_file_path $reg_dir_path/ca.crt Nov 22 07:11:51 crc kubenswrapper[4929]: else Nov 22 07:11:51 crc kubenswrapper[4929]: mkdir $reg_dir_path Nov 22 07:11:51 crc kubenswrapper[4929]: cp $ca_file_path $reg_dir_path/ca.crt Nov 22 07:11:51 crc kubenswrapper[4929]: fi Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: for d in $(ls /etc/docker/certs.d); do Nov 22 07:11:51 crc kubenswrapper[4929]: echo $d Nov 22 07:11:51 crc kubenswrapper[4929]: dp=$(echo $d | sed -r 's/(.*):/\1\.\./') Nov 22 07:11:51 crc kubenswrapper[4929]: reg_conf_path="/tmp/serviceca/${dp}" Nov 22 07:11:51 crc kubenswrapper[4929]: if [ ! -e "${reg_conf_path}" ]; then Nov 22 07:11:51 crc kubenswrapper[4929]: rm -rf /etc/docker/certs.d/$d Nov 22 07:11:51 crc kubenswrapper[4929]: fi Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: sleep 60 & wait ${!} Nov 22 07:11:51 crc kubenswrapper[4929]: done Nov 22 07:11:51 crc kubenswrapper[4929]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{10485760 0} {} 10Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:serviceca,ReadOnly:false,MountPath:/tmp/serviceca,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host,ReadOnly:false,MountPath:/etc/docker/certs.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dn9pl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*1001,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*0,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod node-ca-pvjvh_openshift-image-registry(f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Nov 22 07:11:51 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:11:51 crc kubenswrapper[4929]: E1122 07:11:51.204124 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"node-ca\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-image-registry/node-ca-pvjvh" podUID="f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.211486 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.226190 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.238727 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.257614 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267492 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267537 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267559 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.267906 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.276116 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.283983 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.290802 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.302839 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.317661 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.327330 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.336035 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.363371 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.370313 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.370362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.370371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.370383 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.370393 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.381993 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.393719 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.445109 4929 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.472662 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.472717 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.472732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.472751 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.472766 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.575176 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.575565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.575673 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.575778 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.575863 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.678626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.678693 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.678711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.678736 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.678754 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.782387 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.782468 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.782491 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.782566 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.782608 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.885868 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.885949 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.885970 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.886187 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.886228 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.989144 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.989590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.989769 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.989906 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:51 crc kubenswrapper[4929]: I1122 07:11:51.990004 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:51Z","lastTransitionTime":"2025-11-22T07:11:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.093050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.093092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.093105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.093122 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.093135 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.196577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.196636 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.196644 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.196659 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.196668 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.299532 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.299578 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.299589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.299605 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.299616 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.402480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.402531 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.402547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.402568 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.402583 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.504476 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.504523 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.504536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.504555 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.504568 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.607917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.607997 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.608028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.608060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.608082 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.701251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.701319 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.701343 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.701373 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.701393 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.717427 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.721399 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.721430 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.721439 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.721453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.721464 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.732727 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.736783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.736831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.736846 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.736859 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.736869 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.749068 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.754852 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.754900 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.754913 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.754929 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.754941 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.764277 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.767619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.767651 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.767659 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.767674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.767682 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.775280 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.775406 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.776631 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.776652 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.776660 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.776673 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.776688 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.879898 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.879961 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.879974 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.879993 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.880006 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.946969 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.947105 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.946991 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.947372 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.947102 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:52 crc kubenswrapper[4929]: E1122 07:11:52.947604 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.982909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.982953 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.982969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.982989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:52 crc kubenswrapper[4929]: I1122 07:11:52.983003 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:52Z","lastTransitionTime":"2025-11-22T07:11:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.085243 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.085291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.085307 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.085327 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.085342 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.187981 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.188027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.188038 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.188055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.188067 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.291343 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.291440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.291474 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.291503 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.291522 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.395837 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.395891 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.395903 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.395924 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.395936 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.499747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.499798 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.499808 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.499820 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.499828 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.602743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.602789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.602807 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.602831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.602849 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.706394 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.706467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.706483 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.706508 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.706530 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.809317 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.809371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.809396 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.809426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.809447 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.869503 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.870473 4929 scope.go:117] "RemoveContainer" containerID="d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.912420 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.912511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.912530 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.912557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.912577 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:53Z","lastTransitionTime":"2025-11-22T07:11:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.964249 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.981365 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:53 crc kubenswrapper[4929]: I1122 07:11:53.993899 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.004298 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.015706 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.015738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.015747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.015760 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.015769 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.017513 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.027246 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.041807 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.054729 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.065366 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.078380 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.087905 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.102044 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.118477 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.118536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.118554 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.118578 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.118595 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.126374 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.140110 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.151530 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.220499 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.220577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.220599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.220627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.220651 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.324149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.324202 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.324232 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.324253 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.324269 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.426975 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.427199 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.427227 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.427243 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.427255 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.530463 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.530506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.530517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.530535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.530547 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.633587 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.633666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.633686 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.633710 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.633728 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.736765 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.736836 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.736855 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.736886 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.736905 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.839945 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.840017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.840035 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.840061 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.840079 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.943080 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.943144 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.943163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.943188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.943242 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:54Z","lastTransitionTime":"2025-11-22T07:11:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.946343 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.946425 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:54 crc kubenswrapper[4929]: E1122 07:11:54.946497 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:54 crc kubenswrapper[4929]: I1122 07:11:54.946537 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:54 crc kubenswrapper[4929]: E1122 07:11:54.946704 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:54 crc kubenswrapper[4929]: E1122 07:11:54.946820 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.046005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.046054 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.046071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.046094 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.046112 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.148596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.148631 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.148641 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.148655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.148663 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.213385 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.215984 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.216645 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.234392 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.247589 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.250615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.250651 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.250664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.250680 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.250694 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.270395 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.293102 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.312620 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.314523 4929 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.322549 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.332337 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.344301 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353365 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353434 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353470 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353510 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.353530 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.362895 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.373506 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.382916 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.394427 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.404598 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.415160 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.455725 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.455759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.455768 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.455783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.455793 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.557534 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.557589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.557609 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.557633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.557650 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.660723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.660795 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.660812 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.660838 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.660861 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.764103 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.764163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.764181 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.764204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.764246 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.867534 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.867577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.867586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.867600 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.867609 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.970053 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.970347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.970357 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.970369 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:55 crc kubenswrapper[4929]: I1122 07:11:55.970380 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:55Z","lastTransitionTime":"2025-11-22T07:11:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.073563 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.073645 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.073674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.073704 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.073726 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.176906 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.176965 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.176984 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.177008 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.177027 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.280254 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.280320 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.280335 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.280366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.280416 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.383127 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.383172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.383187 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.383231 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.383250 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.486851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.486911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.486929 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.486954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.486971 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.590308 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.590394 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.590419 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.590452 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.590476 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.693251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.693312 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.693322 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.693337 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.693347 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.796688 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.796759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.796783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.796813 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.796836 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.904687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.904752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.904771 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.904795 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.904813 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:56Z","lastTransitionTime":"2025-11-22T07:11:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.946406 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.946502 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:56 crc kubenswrapper[4929]: E1122 07:11:56.946760 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:56 crc kubenswrapper[4929]: I1122 07:11:56.946811 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:56 crc kubenswrapper[4929]: E1122 07:11:56.947124 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:56 crc kubenswrapper[4929]: E1122 07:11:56.947440 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.008017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.008083 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.008096 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.008112 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.008126 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.110524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.110571 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.110586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.110607 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.110623 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.212619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.212655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.212664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.212678 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.212687 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.315602 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.315645 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.315657 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.315674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.315724 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.419129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.419183 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.419202 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.419260 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.419278 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.522703 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.522762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.522782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.522818 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.522856 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.625458 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.625516 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.625535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.625559 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.625578 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.728869 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.728907 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.728916 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.728937 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.728955 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.831464 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.831498 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.831508 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.831522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.831532 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.934395 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.934478 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.934498 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.934524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:57 crc kubenswrapper[4929]: I1122 07:11:57.934541 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:57Z","lastTransitionTime":"2025-11-22T07:11:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.037022 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.037077 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.037093 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.037113 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.037127 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.140723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.140803 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.140828 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.140860 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.140883 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.226058 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.228262 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.228318 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.239124 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.243581 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.243608 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.243617 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.243630 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.243640 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.250721 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.260534 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.271890 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.278725 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.291584 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.306282 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.330652 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.346803 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.346848 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.346859 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.346876 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.346886 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.347654 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.356654 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.365607 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.372951 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.386989 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.408270 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.416604 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.425883 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.434021 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.440421 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.449471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.449518 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.449529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.449547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.449560 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.452433 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.461819 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.479131 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.487794 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.498150 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.516408 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.524757 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.532479 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.540783 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.551743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.551779 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.551788 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.551802 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.551812 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.553327 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.561638 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.570684 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.622346 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.622462 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.622503 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.622545 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:12:14.62251902 +0000 UTC m=+71.731973043 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.622616 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.622637 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.622681 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:14.622664834 +0000 UTC m=+71.732118897 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.622729 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:14.622705715 +0000 UTC m=+71.732159738 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.655128 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.655175 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.655190 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.655283 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.655309 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.723591 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.723703 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723820 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723883 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723909 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723930 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723958 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.723978 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.724021 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:14.72398891 +0000 UTC m=+71.833442963 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.724072 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:14.724047171 +0000 UTC m=+71.833501334 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.758272 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.758335 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.758353 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.758389 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.758410 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.860406 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.860447 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.860458 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.860475 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.860488 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.946613 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.946663 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.946749 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.946623 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.946884 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:11:58 crc kubenswrapper[4929]: E1122 07:11:58.946979 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.962702 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.962751 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.962769 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.962794 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:58 crc kubenswrapper[4929]: I1122 07:11:58.962812 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:58Z","lastTransitionTime":"2025-11-22T07:11:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.066496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.066576 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.066600 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.066629 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.066652 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.169492 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.169553 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.169570 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.169598 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.169616 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.233606 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.271839 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.271916 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.271940 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.271969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.271991 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.374148 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.374184 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.374195 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.374238 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.374251 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.477974 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.478050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.478073 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.478105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.478126 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.580553 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.580611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.580626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.580648 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.580663 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.684733 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.684807 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.684825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.684851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.684868 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.790395 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.790535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.790611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.790683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.790772 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.894613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.894711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.894729 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.894754 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.894772 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.997334 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.997385 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.997396 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.997415 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:11:59 crc kubenswrapper[4929]: I1122 07:11:59.997427 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:11:59Z","lastTransitionTime":"2025-11-22T07:11:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.099472 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.099516 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.099525 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.099539 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.099549 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.202317 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.202351 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.202362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.202377 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.202387 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.252840 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.268050 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.280563 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.299539 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.304856 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.304891 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.304901 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.304916 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.304928 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.314488 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.327464 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.345186 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.358298 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.381701 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.412923 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.412964 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.412974 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.412990 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.413001 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.439694 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.468659 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.481905 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.493740 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.501703 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.514541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.514606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.514619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.514649 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.514666 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.519159 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.617885 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.617945 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.617962 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.617987 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.618005 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.636765 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5"] Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.637152 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.639117 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.639486 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.656856 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.668236 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.689592 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.717778 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.720615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.720657 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.720667 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.720681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.720691 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.737070 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.749769 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.749830 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.749906 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/779ec211-1252-46f1-80bc-2373dc58be4a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.750024 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xn9b\" (UniqueName: \"kubernetes.io/projected/779ec211-1252-46f1-80bc-2373dc58be4a-kube-api-access-2xn9b\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.760557 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.775509 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.783796 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.798029 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.811629 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.823513 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.823579 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.823594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.823612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.823624 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.825192 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.835437 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.847294 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.850681 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/779ec211-1252-46f1-80bc-2373dc58be4a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.850756 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xn9b\" (UniqueName: \"kubernetes.io/projected/779ec211-1252-46f1-80bc-2373dc58be4a-kube-api-access-2xn9b\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.850783 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.850805 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.851416 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.852057 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/779ec211-1252-46f1-80bc-2373dc58be4a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.854890 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/779ec211-1252-46f1-80bc-2373dc58be4a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.864062 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.867931 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xn9b\" (UniqueName: \"kubernetes.io/projected/779ec211-1252-46f1-80bc-2373dc58be4a-kube-api-access-2xn9b\") pod \"ovnkube-control-plane-749d76644c-g9vj5\" (UID: \"779ec211-1252-46f1-80bc-2373dc58be4a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.879318 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.897291 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:00Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.926283 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.926327 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.926336 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.926351 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.926360 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:00Z","lastTransitionTime":"2025-11-22T07:12:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.946621 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.946724 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.946618 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:00 crc kubenswrapper[4929]: E1122 07:12:00.946765 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:00 crc kubenswrapper[4929]: E1122 07:12:00.946894 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:00 crc kubenswrapper[4929]: E1122 07:12:00.946981 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:00 crc kubenswrapper[4929]: I1122 07:12:00.952020 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" Nov 22 07:12:00 crc kubenswrapper[4929]: W1122 07:12:00.996460 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod779ec211_1252_46f1_80bc_2373dc58be4a.slice/crio-d1a8bd76bb08df24b8adb47f4e6665b5913392b4e71704b2593f32e8e94a650d WatchSource:0}: Error finding container d1a8bd76bb08df24b8adb47f4e6665b5913392b4e71704b2593f32e8e94a650d: Status 404 returned error can't find the container with id d1a8bd76bb08df24b8adb47f4e6665b5913392b4e71704b2593f32e8e94a650d Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.029488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.029535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.029547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.029563 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.029577 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.132066 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.132361 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.132372 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.132387 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.132397 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.234736 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.234773 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.234782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.234796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.234807 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.240450 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-vzgwx" event={"ID":"73f3e50e-3229-46e8-969b-e023922fdbce","Type":"ContainerStarted","Data":"7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.242646 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" event={"ID":"779ec211-1252-46f1-80bc-2373dc58be4a","Type":"ContainerStarted","Data":"d1a8bd76bb08df24b8adb47f4e6665b5913392b4e71704b2593f32e8e94a650d"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.244744 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.258353 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.275151 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.293807 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.314103 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.337370 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.337404 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.337412 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.337426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.337435 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.344868 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.366904 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.379302 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.390562 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.400030 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.412770 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.429998 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442014 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442901 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442918 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442926 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442945 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.442954 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.459959 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.480095 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.492516 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.504826 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.545711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.545758 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.545771 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.545790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.545829 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.648757 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.648801 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.648849 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.648867 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.648878 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.751975 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.752048 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.752067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.752090 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.752107 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.854343 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.854388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.854399 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.854418 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.854431 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.908909 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.916864 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.921439 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.939188 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.952083 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.955842 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.955879 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.955890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.955906 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.955917 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:01Z","lastTransitionTime":"2025-11-22T07:12:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.968026 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.981061 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:01 crc kubenswrapper[4929]: I1122 07:12:01.993244 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.004681 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.015081 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.032103 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.048641 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.057963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.058000 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.058012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.058029 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.058042 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.060741 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.075911 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.089821 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.111969 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.116919 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-vmdgb"] Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.117649 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.117777 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.132731 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.147055 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.157808 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.160106 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.160142 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.160154 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.160171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.160183 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.169709 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.182284 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.192413 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.210475 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.235080 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.248361 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44" exitCode=0 Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.248442 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.248870 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.250288 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" event={"ID":"779ec211-1252-46f1-80bc-2373dc58be4a","Type":"ContainerStarted","Data":"c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.250363 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" event={"ID":"779ec211-1252-46f1-80bc-2373dc58be4a","Type":"ContainerStarted","Data":"5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.258886 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.262781 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.262809 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.262848 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.262865 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.262877 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.266582 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2mjg\" (UniqueName: \"kubernetes.io/projected/42cb9248-6b5b-4970-8232-68883ec65710-kube-api-access-r2mjg\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.266663 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.272116 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.285740 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.301181 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.318615 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.331384 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.342026 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.352480 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.364507 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.365362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.365403 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.365412 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.365426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.365435 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.367143 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.367190 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2mjg\" (UniqueName: \"kubernetes.io/projected/42cb9248-6b5b-4970-8232-68883ec65710-kube-api-access-r2mjg\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.367903 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.367970 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:02.867950269 +0000 UTC m=+59.977404292 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.382267 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.387204 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2mjg\" (UniqueName: \"kubernetes.io/projected/42cb9248-6b5b-4970-8232-68883ec65710-kube-api-access-r2mjg\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.393756 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.405369 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.419930 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.432424 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.444336 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.460597 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.467897 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.467965 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.467990 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.468021 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.468044 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.478072 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.491836 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.512898 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.534588 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.548404 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.560058 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.570776 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.570855 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.571123 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.571372 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.571430 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.576808 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.593679 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.630412 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.648014 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.662007 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.673568 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.673597 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.673606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.673618 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.673627 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.674570 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.683697 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.775777 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.775857 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.775908 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.775934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.775951 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.871092 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.871375 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.871484 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:03.871455608 +0000 UTC m=+60.980909661 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.878780 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.878813 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.878821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.878834 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.878843 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.947262 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.947413 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.947539 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.947750 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.947905 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:02 crc kubenswrapper[4929]: E1122 07:12:02.947994 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.980968 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.980996 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.981004 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.981017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:02 crc kubenswrapper[4929]: I1122 07:12:02.981027 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:02Z","lastTransitionTime":"2025-11-22T07:12:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.084319 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.084389 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.084413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.084443 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.084471 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.161848 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.161895 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.161908 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.161927 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.161942 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.178420 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.183963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.183999 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.184010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.184028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.184041 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.202017 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.206298 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.206332 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.206346 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.206363 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.206378 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.220045 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.224374 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.224424 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.224440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.224462 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.224478 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.237000 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.241126 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.241163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.241182 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.241196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.241219 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.254446 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerStarted","Data":"d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.256235 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.256278 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.258441 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75"} Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.258821 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.258976 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.260340 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.260371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.260384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.260401 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.260413 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.273647 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.287097 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.297968 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.308198 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.320890 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.336050 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.348907 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.359168 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.363284 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.363392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.363410 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.363440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.363458 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.373091 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.383915 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.395052 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.411243 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.421109 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.433882 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.457427 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.465824 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.465875 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.465888 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.465907 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.465921 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.473799 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.485768 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.499103 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.509539 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.523115 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.533795 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.542845 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.550412 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.558555 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.568266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.568296 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.568306 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.568320 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.568329 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.569412 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.580772 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.599853 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.614478 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.625372 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.637801 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.652013 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.661637 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.670867 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.670923 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.670937 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.670958 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.670974 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.674064 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.697256 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.709026 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.746871 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.773734 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.773782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.773795 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.773809 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.773820 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.875290 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.876037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.876175 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.876330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.876475 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.880347 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.880529 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.880606 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:05.880586279 +0000 UTC m=+62.990040322 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.946768 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:03 crc kubenswrapper[4929]: E1122 07:12:03.946936 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.961706 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.974985 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.978731 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.978868 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.978949 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.979036 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.979114 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:03Z","lastTransitionTime":"2025-11-22T07:12:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:03 crc kubenswrapper[4929]: I1122 07:12:03.986746 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.004869 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.023123 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.034009 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.044186 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.068703 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.081312 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.081653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.081666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.081683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.081694 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.111202 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.151912 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.185539 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.185585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.185598 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.185614 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.185630 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.188518 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.231098 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.262850 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-pvjvh" event={"ID":"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416","Type":"ContainerStarted","Data":"7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.271125 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.289586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.289623 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.289639 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.289655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.289665 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.308126 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.348418 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.391934 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.392843 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.392905 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.392920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.392943 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.392959 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.428322 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.466465 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.496064 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.496194 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.496234 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.496261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.496275 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.506568 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.550963 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.588555 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.598563 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.598596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.598608 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.598623 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.598637 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.631184 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.667143 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.700646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.700683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.700693 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.700708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.700722 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.707344 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.749436 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.794833 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.803076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.803127 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.803143 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.803164 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.803180 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.849033 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.871312 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.905163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.905917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.905940 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.905960 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.905979 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:04Z","lastTransitionTime":"2025-11-22T07:12:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.907993 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.945541 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.946680 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.946708 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.946695 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:04 crc kubenswrapper[4929]: E1122 07:12:04.946862 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:04 crc kubenswrapper[4929]: E1122 07:12:04.947000 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:04 crc kubenswrapper[4929]: E1122 07:12:04.947136 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:04 crc kubenswrapper[4929]: I1122 07:12:04.987328 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.009400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.009446 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.009461 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.009480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.009497 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.027057 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.071722 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.111518 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.111570 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.111588 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.111612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.111630 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.115814 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.146017 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.186266 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.214873 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.214931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.214941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.214954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.214963 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.268051 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75" exitCode=0 Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.268155 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.270156 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2" exitCode=0 Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.270218 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.285969 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.302594 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.317611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.317641 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.317652 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.317668 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.317679 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.318280 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.348519 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.393890 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.420594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.420645 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.420653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.420666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.420677 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.429976 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.469042 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.511676 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.523660 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.523792 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.523851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.523919 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.523981 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.548848 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.591237 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.625894 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.626567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.626588 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.626597 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.626608 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.626617 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.665442 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.705958 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.728307 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.728342 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.728353 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.728368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.728378 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.743281 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.785876 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.827568 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.830675 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.830715 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.830726 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.830742 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.830753 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.867269 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.903858 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:05 crc kubenswrapper[4929]: E1122 07:12:05.904010 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:05 crc kubenswrapper[4929]: E1122 07:12:05.904069 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:09.904053154 +0000 UTC m=+67.013507167 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.904356 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.916535 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.933153 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.933188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.933199 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.933234 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.933246 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:05Z","lastTransitionTime":"2025-11-22T07:12:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.947178 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:05 crc kubenswrapper[4929]: E1122 07:12:05.947349 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.947433 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:05 crc kubenswrapper[4929]: I1122 07:12:05.988309 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:05Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.026359 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.035684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.035738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.035787 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.035805 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.035817 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.067017 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.112176 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.137404 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.137456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.137475 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.137500 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.137516 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.148193 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.184699 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.224921 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.240349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.240392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.240406 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.240425 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.240438 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.265984 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.274251 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.276006 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.307988 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.342755 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.342836 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.342860 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.342890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.342914 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.346375 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.391949 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.429805 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.445329 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.445368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.445380 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.445393 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.445402 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.475279 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.507402 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.548162 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.548196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.548231 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.548248 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.548259 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.556935 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.589485 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.628096 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.650451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.650503 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.650517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.650534 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.650547 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.668871 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.710366 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.747543 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.753637 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.753708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.753732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.753759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.753784 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.787050 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.833315 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.855751 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.855790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.855811 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.855830 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.855842 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.869419 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.903900 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.947175 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.947258 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:06 crc kubenswrapper[4929]: E1122 07:12:06.947456 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.947478 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:06 crc kubenswrapper[4929]: E1122 07:12:06.948344 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:06 crc kubenswrapper[4929]: E1122 07:12:06.948404 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.952411 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.958366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.958420 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.958436 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.958457 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.958474 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:06Z","lastTransitionTime":"2025-11-22T07:12:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:06 crc kubenswrapper[4929]: I1122 07:12:06.987520 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:06Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.029459 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.061196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.061247 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.061257 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.061271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.061281 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.078424 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.111456 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.150749 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.163653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.163707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.163728 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.163753 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.163773 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.189094 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.238460 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.264532 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.265969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.265995 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.266004 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.266016 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.266025 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.282108 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.286108 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f" exitCode=0 Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.286170 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.308596 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.356031 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.369035 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.369097 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.369109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.369146 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.369159 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.385029 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.436695 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.466646 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.471593 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.471633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.471645 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.471661 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.471672 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.506291 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.549202 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.574319 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.574368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.574384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.574407 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.574423 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.587787 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.625852 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.667939 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.676852 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.676884 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.676893 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.676925 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.676935 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.714738 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.748096 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.780323 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.780379 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.780392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.780409 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.780422 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.785665 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.829521 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.869709 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.882517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.882817 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.883022 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.883180 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.883381 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.908030 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.946577 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:07 crc kubenswrapper[4929]: E1122 07:12:07.946728 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.953126 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.986287 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.986349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.986366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.986390 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.986407 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:07Z","lastTransitionTime":"2025-11-22T07:12:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:07 crc kubenswrapper[4929]: I1122 07:12:07.987996 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:07Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.026684 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:08Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.071468 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:08Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.095299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.095364 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.095378 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.095400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.095417 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.198067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.198481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.198504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.198526 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.198542 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.301641 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.301700 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.301713 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.301732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.301744 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.404298 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.404336 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.404347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.404363 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.404378 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.506985 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.507024 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.507034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.507048 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.507060 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.610010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.610083 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.610099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.610122 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.610139 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.712239 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.712572 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.712585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.712603 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.712619 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.815384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.815447 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.815463 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.815489 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.815512 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.917910 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.917966 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.917984 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.918011 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.918034 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:08Z","lastTransitionTime":"2025-11-22T07:12:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.947022 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.947087 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:08 crc kubenswrapper[4929]: E1122 07:12:08.947263 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:08 crc kubenswrapper[4929]: I1122 07:12:08.947321 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:08 crc kubenswrapper[4929]: E1122 07:12:08.947418 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:08 crc kubenswrapper[4929]: E1122 07:12:08.947494 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.020646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.020692 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.020709 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.020733 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.020750 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.122968 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.123010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.123021 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.123036 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.123048 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.225826 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.227021 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.227638 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.227723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.227748 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.331051 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.331084 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.331096 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.331112 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.331122 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.434291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.434359 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.434413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.434444 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.434468 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.537681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.537717 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.537727 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.537740 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.537750 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.640541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.640585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.640599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.640617 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.640631 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.743896 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.743966 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.743986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.744016 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.744039 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.846762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.846803 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.846811 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.846825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.846836 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.946288 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:09 crc kubenswrapper[4929]: E1122 07:12:09.946463 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:09 crc kubenswrapper[4929]: E1122 07:12:09.946525 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:17.946508677 +0000 UTC m=+75.055962690 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.946708 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:09 crc kubenswrapper[4929]: E1122 07:12:09.946911 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.948403 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.948451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.948467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.948489 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:09 crc kubenswrapper[4929]: I1122 07:12:09.948508 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:09Z","lastTransitionTime":"2025-11-22T07:12:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.051141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.051172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.051181 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.051193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.051202 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.153616 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.153655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.153674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.153690 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.153715 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.256629 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.256701 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.256725 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.256752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.256770 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.301445 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.305240 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.359104 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.359179 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.359202 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.359261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.359284 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.462031 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.462065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.462092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.462106 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.462115 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.564140 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.564174 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.564183 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.564198 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.564223 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.665896 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.665985 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.666011 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.666041 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.666058 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.768430 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.768468 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.768476 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.768489 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.768500 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.871638 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.871701 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.871724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.871755 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.871780 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.946406 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.946475 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.946423 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:10 crc kubenswrapper[4929]: E1122 07:12:10.946616 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:10 crc kubenswrapper[4929]: E1122 07:12:10.946713 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:10 crc kubenswrapper[4929]: E1122 07:12:10.946880 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.975321 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.975389 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.975411 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.975440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:10 crc kubenswrapper[4929]: I1122 07:12:10.975470 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:10Z","lastTransitionTime":"2025-11-22T07:12:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.078273 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.078330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.078347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.078374 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.078392 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.181421 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.181477 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.181494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.181517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.181534 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.284120 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.284157 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.284165 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.284178 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.284186 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.310388 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6" exitCode=0 Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.310438 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.387607 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.387653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.387664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.387683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.387695 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.490966 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.491031 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.491050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.491071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.491085 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.593165 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.593194 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.593203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.593254 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.593268 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.695997 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.696060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.696078 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.696105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.696123 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.800682 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.800747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.800764 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.800790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.800807 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.903820 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.903885 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.903906 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.903933 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.903953 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:11Z","lastTransitionTime":"2025-11-22T07:12:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:11 crc kubenswrapper[4929]: I1122 07:12:11.946685 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:11 crc kubenswrapper[4929]: E1122 07:12:11.946924 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.006685 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.006733 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.006749 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.006768 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.006779 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.109579 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.109681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.109704 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.109730 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.109750 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.212871 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.212942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.212962 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.212986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.213003 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.316714 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.316825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.316847 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.316899 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.316922 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.320360 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.340180 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.358050 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.376472 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.388009 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.401390 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.414849 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.419071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.419125 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.419137 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.419155 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.419167 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.431632 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.443349 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.458608 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.473378 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.486846 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.497980 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.512277 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.521343 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.521393 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.521411 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.521442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.521469 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.525248 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.540908 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.559979 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.569808 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.580661 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:12Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.624007 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.624054 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.624067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.624087 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.624100 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.727612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.727679 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.727698 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.727723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.727744 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.830848 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.830899 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.830915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.830931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.830944 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.934063 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.934109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.934119 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.934135 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.934146 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:12Z","lastTransitionTime":"2025-11-22T07:12:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.946498 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.946552 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:12 crc kubenswrapper[4929]: I1122 07:12:12.946498 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:12 crc kubenswrapper[4929]: E1122 07:12:12.946690 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:12 crc kubenswrapper[4929]: E1122 07:12:12.946815 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:12 crc kubenswrapper[4929]: E1122 07:12:12.946913 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.037334 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.037439 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.037461 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.037489 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.037508 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.139493 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.139529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.139538 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.139562 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.139571 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.241992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.242058 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.242069 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.242089 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.242101 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.325906 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.329117 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.344862 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.344901 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.344918 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.344934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.344945 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.448291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.448363 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.448378 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.448397 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.448411 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.550595 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.550648 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.550688 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.550711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.550725 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.552648 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.552704 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.552717 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.552730 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.552741 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.568717 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.576715 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.576791 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.576812 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.576837 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.576859 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.593585 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.597358 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.597383 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.597391 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.597405 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.597414 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.609716 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.615413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.615450 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.615459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.615471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.615481 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.632377 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.637626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.637665 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.637676 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.637692 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.637705 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.658068 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.658353 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.660255 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.660308 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.660326 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.660348 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.660366 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.763585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.763647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.763664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.763688 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.763707 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.866907 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.866993 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.867244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.867283 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.867306 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.947130 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:13 crc kubenswrapper[4929]: E1122 07:12:13.947443 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.967745 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.970002 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.970064 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.970077 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.970100 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.970117 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:13Z","lastTransitionTime":"2025-11-22T07:12:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.979923 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:13 crc kubenswrapper[4929]: I1122 07:12:13.994735 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:13Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.011667 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.025516 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.052157 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.070343 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.071941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.071977 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.071989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.072007 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.072019 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.082588 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.099343 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.116548 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.127300 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.145061 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.167511 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.174197 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.174251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.174262 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.174277 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.174288 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.181722 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.192321 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.204518 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.215892 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.236498 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.276427 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.276646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.276711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.276782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.276855 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.336273 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.351876 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.366328 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.377480 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.378905 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.379009 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.379081 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.379147 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.379221 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.389651 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.403038 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.415535 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.429539 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.449640 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.463821 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.482055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.482098 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.482108 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.482123 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.482138 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.484795 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.500570 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.517032 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.533160 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.547742 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.561178 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.573334 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.585413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.585467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.585481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.585505 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.585520 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.589044 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.603917 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:14Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.688503 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.688545 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.688557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.688574 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.688586 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.699201 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.699421 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:12:46.699390091 +0000 UTC m=+103.808844144 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.699484 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.699589 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.699632 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.699716 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.699738 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:46.699714439 +0000 UTC m=+103.809168462 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.699767 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:46.69975501 +0000 UTC m=+103.809209033 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.791772 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.791823 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.791837 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.791884 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.791903 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.800554 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.800708 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800799 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800829 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800841 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800898 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:46.800882509 +0000 UTC m=+103.910336522 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800931 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800970 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.800995 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.801099 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:46.801068153 +0000 UTC m=+103.910522206 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.895687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.895745 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.895761 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.895781 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.895796 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.947183 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.947273 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.947304 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.947382 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.947546 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:14 crc kubenswrapper[4929]: E1122 07:12:14.947671 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.998012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.998055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.998068 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.998086 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:14 crc kubenswrapper[4929]: I1122 07:12:14.998098 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:14Z","lastTransitionTime":"2025-11-22T07:12:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.100200 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.100271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.100284 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.100299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.100310 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.203109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.203179 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.203203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.203269 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.203293 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.305483 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.305565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.305590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.305622 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.305644 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.407782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.407845 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.407855 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.407867 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.407875 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.510017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.510059 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.510068 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.510080 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.510091 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.613112 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.613163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.613176 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.613192 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.613225 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.715954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.716014 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.716025 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.716040 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.716051 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.818270 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.818313 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.818324 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.818339 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.818350 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.920459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.920523 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.920541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.920567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.920586 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:15Z","lastTransitionTime":"2025-11-22T07:12:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:15 crc kubenswrapper[4929]: I1122 07:12:15.946792 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:15 crc kubenswrapper[4929]: E1122 07:12:15.946921 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.023555 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.023633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.023661 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.023689 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.023709 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.127043 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.127105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.127121 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.127147 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.127165 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.229814 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.229861 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.229876 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.229893 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.229905 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.333037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.333101 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.333116 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.333136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.333148 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.434895 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.434948 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.434964 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.434982 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.434994 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.537236 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.537293 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.537302 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.537314 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.537322 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.639683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.639738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.639754 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.639778 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.639798 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.742831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.742909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.742931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.742957 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.742973 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.845251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.845298 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.845307 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.845321 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.845330 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.946556 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.946571 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.946678 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:16 crc kubenswrapper[4929]: E1122 07:12:16.946825 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:16 crc kubenswrapper[4929]: E1122 07:12:16.946681 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:16 crc kubenswrapper[4929]: E1122 07:12:16.946863 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.948100 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.948134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.948143 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.948178 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:16 crc kubenswrapper[4929]: I1122 07:12:16.948187 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:16Z","lastTransitionTime":"2025-11-22T07:12:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.050547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.050615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.050630 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.050646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.050702 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.155190 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.155247 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.155259 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.155302 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.155316 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.262076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.262134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.262152 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.262172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.262188 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.352943 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522" exitCode=0 Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.352980 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.364105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.364136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.364146 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.364161 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.364171 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.373946 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.393657 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.409334 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.424807 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.437177 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.450031 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.459919 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.466413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.466442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.466451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.466463 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.466471 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.475703 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.494714 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.506792 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.517101 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.526583 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.537626 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.550554 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.561732 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.568665 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.568704 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.568719 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.568737 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.568746 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.571909 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.585882 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.600071 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:17Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.671596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.671661 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.671679 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.671708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.671725 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.777159 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.777262 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.777286 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.777318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.777341 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.879856 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.879890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.879901 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.879917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.879928 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.947000 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:17 crc kubenswrapper[4929]: E1122 07:12:17.947152 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.981921 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.981969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.981980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.981999 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:17 crc kubenswrapper[4929]: I1122 07:12:17.982012 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:17Z","lastTransitionTime":"2025-11-22T07:12:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.034861 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:18 crc kubenswrapper[4929]: E1122 07:12:18.035154 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:18 crc kubenswrapper[4929]: E1122 07:12:18.035286 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:12:34.035259824 +0000 UTC m=+91.144713877 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.083992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.084043 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.084055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.084075 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.084090 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.186480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.186522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.186536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.186551 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.186562 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.289743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.289780 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.289789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.289804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.289815 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.360563 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.392285 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.392334 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.392346 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.392364 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.392377 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.494790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.494860 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.494883 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.494916 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.494940 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.597174 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.597237 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.597249 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.597269 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.597293 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.699968 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.700007 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.700018 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.700033 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.700070 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.801836 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.801881 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.801894 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.801911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.801923 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.903874 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.903939 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.903956 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.903979 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.903998 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:18Z","lastTransitionTime":"2025-11-22T07:12:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.946500 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.946513 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:18 crc kubenswrapper[4929]: E1122 07:12:18.946659 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:18 crc kubenswrapper[4929]: I1122 07:12:18.946509 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:18 crc kubenswrapper[4929]: E1122 07:12:18.946841 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:18 crc kubenswrapper[4929]: E1122 07:12:18.946968 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.006543 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.006586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.006597 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.006611 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.006624 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.108978 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.109017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.109027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.109045 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.109056 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.212500 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.212550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.212561 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.212578 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.212589 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.315269 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.315315 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.315327 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.315342 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.315353 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.417891 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.417947 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.417967 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.417992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.418013 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.520686 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.520724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.520734 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.520752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.520765 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.622662 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.622728 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.622748 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.622772 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.622789 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.724150 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.724192 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.724201 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.724233 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.724245 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.826714 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.826782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.826804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.826831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.826856 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.929287 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.929326 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.929337 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.929354 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.929368 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:19Z","lastTransitionTime":"2025-11-22T07:12:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:19 crc kubenswrapper[4929]: I1122 07:12:19.946860 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:19 crc kubenswrapper[4929]: E1122 07:12:19.947000 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.031560 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.031598 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.031606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.031623 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.031631 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.134349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.134423 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.134456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.134485 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.134508 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.236555 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.236586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.236594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.236614 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.236624 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.339808 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.339845 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.339856 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.339872 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.339883 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.378969 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.441924 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.441956 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.441963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.441977 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.441986 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.544724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.544774 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.544791 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.544814 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.544831 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.646724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.646773 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.646785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.646803 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.646815 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.749027 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.749067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.749077 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.749093 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.749103 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.851384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.851440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.851458 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.851479 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.851497 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.947184 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.947200 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:20 crc kubenswrapper[4929]: E1122 07:12:20.947326 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.947370 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:20 crc kubenswrapper[4929]: E1122 07:12:20.947538 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:20 crc kubenswrapper[4929]: E1122 07:12:20.947656 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.953920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.953954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.953966 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.953979 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:20 crc kubenswrapper[4929]: I1122 07:12:20.953990 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:20Z","lastTransitionTime":"2025-11-22T07:12:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.056054 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.056099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.056109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.056125 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.056136 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.158244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.158318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.158340 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.158369 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.158391 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.261169 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.261229 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.261242 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.261258 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.261268 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.363096 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.363129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.363157 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.363171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.363179 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.399726 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.416594 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.431074 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.441902 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.458250 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.465310 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.465350 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.465363 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.465382 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.465397 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.468840 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.479890 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.490015 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.499840 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.508236 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.520028 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.533667 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.545123 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.555151 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567722 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567757 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567766 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567779 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567787 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.567793 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.577087 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.588957 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.608326 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:21Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.670983 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.671038 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.671049 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.671063 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.671071 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.773226 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.773252 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.773260 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.773273 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.773281 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.875025 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.875063 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.875074 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.875088 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.875099 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.946682 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:21 crc kubenswrapper[4929]: E1122 07:12:21.946815 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.962666 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.976971 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.977001 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.977012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.977028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:21 crc kubenswrapper[4929]: I1122 07:12:21.977040 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:21Z","lastTransitionTime":"2025-11-22T07:12:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.081154 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.081185 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.081195 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.081226 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.081239 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.183170 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.183204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.183239 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.183255 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.183265 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.285203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.285266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.285279 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.285294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.285304 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.387114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.387172 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.387185 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.387201 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.387225 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.388029 4929 generic.go:334] "Generic (PLEG): container finished" podID="78d8d828-48aa-4499-a4af-54e0dd754349" containerID="f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda" exitCode=0 Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.388101 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerDied","Data":"f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.392577 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.399538 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.412426 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.430864 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.442725 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.460115 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.472629 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.482276 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.489450 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.489512 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.489522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.489535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.489543 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.497602 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.510902 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.523967 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.538199 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.551694 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.571900 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.587103 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.592185 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.592256 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.592273 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.592291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.592304 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.602200 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.622465 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.640245 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.658167 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.686822 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:22Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.696116 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.696153 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.696161 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.696174 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.696182 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.799544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.799587 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.799595 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.799610 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.799621 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.901732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.901770 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.901780 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.901796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.901805 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:22Z","lastTransitionTime":"2025-11-22T07:12:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.947164 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.947188 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:22 crc kubenswrapper[4929]: E1122 07:12:22.947277 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:22 crc kubenswrapper[4929]: I1122 07:12:22.947295 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:22 crc kubenswrapper[4929]: E1122 07:12:22.947350 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:22 crc kubenswrapper[4929]: E1122 07:12:22.947447 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.004292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.004331 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.004344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.004359 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.004369 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.106798 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.106831 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.106843 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.106858 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.106867 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.209540 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.209581 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.209594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.209613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.209629 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.312193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.312267 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.312280 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.312299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.312311 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.395329 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.395381 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.410521 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.414562 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.414592 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.414604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.414622 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.414636 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.425629 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.431158 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.441323 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.454111 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.467128 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.478550 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.492467 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.507303 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.517038 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.517085 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.517303 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.517327 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.517341 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.519731 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.532052 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.542118 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.555754 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.575836 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.586455 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.600038 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.620340 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.620403 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.620422 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.620445 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.620462 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.625287 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.637604 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.651137 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.666296 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.679014 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.692755 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.714174 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.724789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.724830 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.724843 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.724864 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.724877 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.743076 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.779943 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.788354 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.788390 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.788401 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.788416 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.788428 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.801649 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.802848 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.805388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.805418 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.805430 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.805447 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.805460 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.816251 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.819915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.819973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.819986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.820012 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.820028 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.824576 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.833894 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.837673 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.839168 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.839196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.839206 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.839245 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.839255 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.850123 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.851065 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.855429 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.855482 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.855497 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.855522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.855536 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.864981 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.870355 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.870516 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.872099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.872126 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.872138 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.872153 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.872163 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.878000 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.890913 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.904904 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.920843 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.932949 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.946548 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:23 crc kubenswrapper[4929]: E1122 07:12:23.946651 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.949638 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.964404 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.975044 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.975086 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.975098 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.975117 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.975130 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:23Z","lastTransitionTime":"2025-11-22T07:12:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:23 crc kubenswrapper[4929]: I1122 07:12:23.981321 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:23Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.002957 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.019246 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.031342 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.041442 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.053031 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.064303 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.075457 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.077158 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.077186 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.077196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.077227 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.077246 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.089644 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.101603 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.112652 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.128145 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.140418 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.154081 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.173103 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.179440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.179491 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.179504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.179524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.179537 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.184894 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.201303 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.226620 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.242973 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.261514 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.275806 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.281453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.281485 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.281494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.281527 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.281540 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.384251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.384295 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.384306 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.384322 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.384334 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.401771 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" event={"ID":"78d8d828-48aa-4499-a4af-54e0dd754349","Type":"ContainerStarted","Data":"267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.402184 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.428798 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.441339 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.457652 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.471411 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.482283 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.486464 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.486506 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.486517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.486534 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.486548 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.495338 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.508567 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.518850 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.529454 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.548183 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.569459 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.581640 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.588577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.588610 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.588626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.588642 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.588656 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.595492 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.608350 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.617238 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.634553 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.645963 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.660254 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.670541 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.680296 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:24Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.690917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.690942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.690950 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.690961 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.690970 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.793799 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.793886 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.793911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.793942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.793965 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.895958 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.896037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.896235 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.896255 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.896268 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.947163 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:24 crc kubenswrapper[4929]: E1122 07:12:24.947329 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.947340 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.947417 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:24 crc kubenswrapper[4929]: E1122 07:12:24.947507 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:24 crc kubenswrapper[4929]: E1122 07:12:24.947551 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.997998 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.998037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.998050 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.998067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:24 crc kubenswrapper[4929]: I1122 07:12:24.998079 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:24Z","lastTransitionTime":"2025-11-22T07:12:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.100177 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.100495 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.100636 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.100751 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.100882 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.204004 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.204291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.204391 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.204496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.204580 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.306775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.306810 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.306819 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.306832 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.306843 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.409331 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.409402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.409424 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.409453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.409477 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.426657 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.444305 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.455680 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.480768 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.494467 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.511678 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.511736 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.511747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.511762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.511772 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.512673 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.523575 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.534825 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.547038 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.558241 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.570842 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.591797 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.605246 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.614729 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.614789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.614802 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.614823 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.614836 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.618923 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.631093 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.643123 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.655629 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.671615 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.691502 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:25Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.717291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.717333 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.717348 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.717366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.717378 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.819878 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.819908 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.819917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.819929 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.819938 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.921987 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.922023 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.922031 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.922044 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.922054 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:25Z","lastTransitionTime":"2025-11-22T07:12:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:25 crc kubenswrapper[4929]: I1122 07:12:25.946808 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:25 crc kubenswrapper[4929]: E1122 07:12:25.946967 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.024023 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.024102 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.024125 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.024153 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.024174 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.127332 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.127414 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.127433 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.127459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.127483 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.229904 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.229942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.229953 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.229968 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.229977 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.332725 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.332762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.332770 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.332784 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.332793 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.426502 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" probeResult="failure" output="" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.435438 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.435477 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.435486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.435502 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.435514 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.537613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.537680 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.537697 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.537721 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.537738 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.639687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.639740 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.639759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.639780 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.639795 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.742471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.742533 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.742557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.742585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.742620 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.845761 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.846024 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.846107 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.846193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.846304 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.947030 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.947102 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.947478 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:26 crc kubenswrapper[4929]: E1122 07:12:26.947725 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:26 crc kubenswrapper[4929]: E1122 07:12:26.947812 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:26 crc kubenswrapper[4929]: E1122 07:12:26.947897 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.948982 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.949034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.949053 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.949076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:26 crc kubenswrapper[4929]: I1122 07:12:26.949094 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:26Z","lastTransitionTime":"2025-11-22T07:12:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.051757 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.051825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.051847 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.051882 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.051917 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.155065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.155134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.155159 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.155188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.155246 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.258098 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.258164 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.258183 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.258204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.258248 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.360960 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.361363 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.361579 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.361725 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.361963 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.464308 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.464375 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.464398 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.464421 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.464438 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.567537 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.568133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.568258 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.568360 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.568448 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.671808 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.672279 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.672505 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.672746 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.672951 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.776349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.776399 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.776409 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.776432 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.776447 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.878986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.879236 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.879310 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.879379 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.879458 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.946655 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:27 crc kubenswrapper[4929]: E1122 07:12:27.946999 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.987658 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.987724 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.987737 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.987755 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:27 crc kubenswrapper[4929]: I1122 07:12:27.987764 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:27Z","lastTransitionTime":"2025-11-22T07:12:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.090133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.090194 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.090204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.090236 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.090249 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.192992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.193082 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.193114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.193149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.193173 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.295980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.296033 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.296049 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.296067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.296080 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.399318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.399815 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.399843 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.399885 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.399917 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.502950 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.503040 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.503060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.503103 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.503121 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.605865 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.605904 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.605915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.605931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.605942 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.708456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.708504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.708514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.708533 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.708545 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.814821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.814888 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.814927 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.814960 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.814982 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.917682 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.917722 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.917738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.917754 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.917766 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:28Z","lastTransitionTime":"2025-11-22T07:12:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.946348 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.946390 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:28 crc kubenswrapper[4929]: E1122 07:12:28.946565 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:28 crc kubenswrapper[4929]: I1122 07:12:28.946385 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:28 crc kubenswrapper[4929]: E1122 07:12:28.946705 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:28 crc kubenswrapper[4929]: E1122 07:12:28.947146 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.020440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.020504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.020521 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.020545 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.020562 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.122787 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.122821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.122829 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.122843 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.122852 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.225593 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.225659 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.225676 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.225700 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.225718 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.328493 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.328545 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.328566 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.328595 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.328619 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.431291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.431384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.431405 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.431431 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.431448 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.534666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.534714 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.534726 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.534743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.534756 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.637326 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.637400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.637426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.637452 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.637494 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.740413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.740442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.740451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.740466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.740478 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.842730 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.842770 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.842777 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.842789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.842798 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.945579 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.945649 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.945674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.945699 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.945714 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:29Z","lastTransitionTime":"2025-11-22T07:12:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:29 crc kubenswrapper[4929]: I1122 07:12:29.946589 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:29 crc kubenswrapper[4929]: E1122 07:12:29.946719 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.048752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.048795 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.048803 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.048816 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.048825 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.151366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.151418 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.151435 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.151459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.151478 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.253965 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.254020 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.254036 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.254060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.254077 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.357003 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.357067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.357092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.357121 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.357144 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.422874 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/0.log" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.426520 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b" exitCode=1 Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.426541 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.427662 4929 scope.go:117] "RemoveContainer" containerID="78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.448343 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.460567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.460631 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.460656 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.460683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.460705 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.471965 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.488845 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.515415 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:29Z\\\",\\\"message\\\":\\\"gressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:29.377095 6438 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377200 6438 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377297 6438 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:29.377884 6438 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:29.377917 6438 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:29.377928 6438 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:29.377954 6438 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 07:12:29.378002 6438 factory.go:656] Stopping watch factory\\\\nI1122 07:12:29.378012 6438 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:29.378022 6438 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:29.378037 6438 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 07:12:29.378055 6438 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.534490 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.559928 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.563783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.563842 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.563860 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.563884 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.563904 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.572712 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.586513 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.600088 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.611756 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.632798 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.651659 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666788 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666828 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666846 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.666937 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.687452 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.703456 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.721435 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.737392 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.751550 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.768649 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:30Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.769778 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.769824 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.769841 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.769863 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.769880 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.872464 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.872528 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.872551 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.872581 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.872606 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.946878 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.946954 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.946971 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:30 crc kubenswrapper[4929]: E1122 07:12:30.947126 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:30 crc kubenswrapper[4929]: E1122 07:12:30.947396 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:30 crc kubenswrapper[4929]: E1122 07:12:30.947461 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.976599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.976676 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.976694 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.976771 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:30 crc kubenswrapper[4929]: I1122 07:12:30.976791 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:30Z","lastTransitionTime":"2025-11-22T07:12:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.079474 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.079502 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.079511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.079524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.079533 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.182488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.182782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.182794 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.182810 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.182822 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.284876 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.284911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.284921 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.284934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.284944 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.387040 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.387089 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.387101 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.387120 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.387134 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.431170 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/0.log" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.435030 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.435406 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.453687 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.467762 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489038 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489625 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489664 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489680 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.489691 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.504229 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.518881 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.531917 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.549759 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.563398 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.577841 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.591638 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.591671 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.591681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.591695 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.591705 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.598664 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:29Z\\\",\\\"message\\\":\\\"gressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:29.377095 6438 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377200 6438 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377297 6438 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:29.377884 6438 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:29.377917 6438 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:29.377928 6438 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:29.377954 6438 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 07:12:29.378002 6438 factory.go:656] Stopping watch factory\\\\nI1122 07:12:29.378012 6438 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:29.378022 6438 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:29.378037 6438 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 07:12:29.378055 6438 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.614947 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.625492 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.638280 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.651461 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.663267 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.680014 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.693970 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.694026 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.694176 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.694231 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.694253 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.707062 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.718361 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.732631 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:31Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.796905 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.796973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.796990 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.797014 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.797033 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.899790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.899854 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.899872 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.899899 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.899919 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:31Z","lastTransitionTime":"2025-11-22T07:12:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:31 crc kubenswrapper[4929]: I1122 07:12:31.946672 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:31 crc kubenswrapper[4929]: E1122 07:12:31.946873 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.002334 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.002402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.002422 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.002446 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.002465 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.105413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.105477 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.105500 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.105527 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.105547 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.207992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.208049 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.208066 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.208088 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.208105 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.310757 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.310825 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.310844 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.310867 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.310886 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.413458 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.413523 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.413541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.413563 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.413579 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.440849 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/1.log" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.442255 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/0.log" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.446962 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa" exitCode=1 Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.447031 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.447084 4929 scope.go:117] "RemoveContainer" containerID="78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.448173 4929 scope.go:117] "RemoveContainer" containerID="5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa" Nov 22 07:12:32 crc kubenswrapper[4929]: E1122 07:12:32.448474 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.467383 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.483363 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.502481 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.516117 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.516179 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.516196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.516253 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.516271 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.522499 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.543043 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.559790 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.577723 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.594030 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.616297 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.619294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.619344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.619361 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.619383 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.619402 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.634600 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.658817 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://78d097af2e3e02f927967930e1a43b2841dac85f169b1676c899085efc7b7c4b\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:29Z\\\",\\\"message\\\":\\\"gressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:29.377095 6438 reflector.go:311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377200 6438 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 07:12:29.377297 6438 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:29.377884 6438 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:29.377917 6438 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:29.377928 6438 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:29.377954 6438 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 07:12:29.378002 6438 factory.go:656] Stopping watch factory\\\\nI1122 07:12:29.378012 6438 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:29.378022 6438 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:29.378037 6438 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 07:12:29.378055 6438 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.679690 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.698406 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.718251 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.722085 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.722171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.722189 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.722255 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.722274 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.737020 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.750077 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.766276 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.796956 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.816153 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:32Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.825632 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.825677 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.825686 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.825700 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.825710 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.929136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.929203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.929260 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.929291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.929313 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:32Z","lastTransitionTime":"2025-11-22T07:12:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.946494 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.946644 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:32 crc kubenswrapper[4929]: E1122 07:12:32.946650 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:32 crc kubenswrapper[4929]: E1122 07:12:32.946831 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:32 crc kubenswrapper[4929]: I1122 07:12:32.946495 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:32 crc kubenswrapper[4929]: E1122 07:12:32.947047 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.031653 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.031743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.031774 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.031804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.031826 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.135113 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.135364 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.135458 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.135518 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.135580 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.238498 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.238551 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.238567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.238590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.238607 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.341507 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.341565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.341585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.341602 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.341617 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.443747 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.444046 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.444261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.444454 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.444616 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.452965 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/1.log" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.457719 4929 scope.go:117] "RemoveContainer" containerID="5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa" Nov 22 07:12:33 crc kubenswrapper[4929]: E1122 07:12:33.457932 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.474344 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.488908 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.501710 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.513106 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.528473 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.547093 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.547130 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.547141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.547158 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.547169 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.548510 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.563609 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.579895 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.594184 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.610721 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.627112 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.639026 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.649842 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.649909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.649932 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.649959 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.649985 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.653067 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.667042 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.676825 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.688689 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.702607 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.726317 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.745637 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.752459 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.752531 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.752552 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.752581 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.752603 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.855250 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.855330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.855354 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.855384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.855406 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.947264 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:33 crc kubenswrapper[4929]: E1122 07:12:33.947618 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.957749 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.957809 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.957826 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.957852 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.957870 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.964054 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.966271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.966303 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.966333 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.966346 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.966356 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:33 crc kubenswrapper[4929]: E1122 07:12:33.979846 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.981572 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.984010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.984076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.984094 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.984119 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:33 crc kubenswrapper[4929]: I1122 07:12:33.984137 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:33Z","lastTransitionTime":"2025-11-22T07:12:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.002687 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:33Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.006253 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.011624 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.011697 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.011723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.011756 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.011824 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.016762 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.032963 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.036000 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.037098 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.037257 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.037286 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.037312 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.037329 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.051073 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.055750 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.055869 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.055930 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.055998 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.056063 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.060007 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.074279 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.074244 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.074388 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.076409 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.076558 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.076969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.077227 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.077787 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.086377 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.098065 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.110384 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.115872 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.116020 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.116116 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:13:06.116093077 +0000 UTC m=+123.225547100 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.122496 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.134936 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.144497 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.153931 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.167092 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.179950 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.180005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.180013 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.180026 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.180035 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.184627 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.203116 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.225481 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.246085 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:34Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.282762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.282829 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.282855 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.282887 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.282911 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.385545 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.385591 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.385604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.385626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.385641 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.487722 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.487850 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.487884 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.487989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.488062 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.590644 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.590697 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.590707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.590721 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.590730 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.692578 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.692614 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.692630 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.692647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.692659 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.795365 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.795435 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.795456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.795484 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.795505 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.898429 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.898502 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.898525 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.898557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.898580 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:34Z","lastTransitionTime":"2025-11-22T07:12:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.946656 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.946723 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:34 crc kubenswrapper[4929]: I1122 07:12:34.946671 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.946832 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.947021 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:34 crc kubenswrapper[4929]: E1122 07:12:34.947140 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.001447 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.001541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.001565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.001599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.001622 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.104614 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.104672 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.104691 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.104715 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.104733 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.207184 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.207249 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.207261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.207276 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.207289 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.310613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.310692 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.310720 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.310751 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.310775 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.414058 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.414106 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.414117 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.414130 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.414139 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.517266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.517362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.517388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.517425 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.517448 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.620615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.620691 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.620712 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.620739 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.620757 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.723155 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.723200 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.723233 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.723251 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.723262 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.826199 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.826292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.826312 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.826337 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.826357 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.928873 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.928922 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.928934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.928949 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.928962 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:35Z","lastTransitionTime":"2025-11-22T07:12:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:35 crc kubenswrapper[4929]: I1122 07:12:35.946680 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:35 crc kubenswrapper[4929]: E1122 07:12:35.946835 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.032110 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.032158 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.032171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.032191 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.032254 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.134908 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.134976 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.134999 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.135024 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.135043 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.237295 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.237355 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.237377 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.237402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.237422 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.339438 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.339481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.339496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.339516 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.339532 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.442752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.442824 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.442841 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.442866 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.442886 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.545585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.545643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.545681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.545713 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.545736 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.648384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.648448 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.648466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.648492 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.648511 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.750736 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.750778 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.750791 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.750804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.750812 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.853532 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.853577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.853589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.853604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.853615 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.946346 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.946431 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:36 crc kubenswrapper[4929]: E1122 07:12:36.946533 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.946346 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:36 crc kubenswrapper[4929]: E1122 07:12:36.946683 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:36 crc kubenswrapper[4929]: E1122 07:12:36.946856 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.957358 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.957426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.957451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.957478 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:36 crc kubenswrapper[4929]: I1122 07:12:36.957500 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:36Z","lastTransitionTime":"2025-11-22T07:12:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.060818 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.060866 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.060877 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.060895 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.060908 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.163018 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.163057 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.163065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.163078 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.163088 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.265684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.265723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.265735 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.265750 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.265761 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.368202 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.368291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.368312 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.368335 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.368353 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.470177 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.470413 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.470500 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.470605 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.470679 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.574141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.574474 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.574723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.574845 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.574968 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.677954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.677989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.678003 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.678024 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.678039 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.780612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.781015 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.781159 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.781338 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.781471 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.884810 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.884880 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.884895 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.884917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.884931 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.947389 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:37 crc kubenswrapper[4929]: E1122 07:12:37.947573 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.986471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.986510 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.986521 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.986536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:37 crc kubenswrapper[4929]: I1122 07:12:37.986547 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:37Z","lastTransitionTime":"2025-11-22T07:12:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.089295 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.089346 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.089357 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.089374 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.089386 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.192288 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.192351 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.192368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.192393 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.192414 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.295026 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.295084 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.295102 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.295129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.295201 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.398034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.398127 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.398152 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.398189 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.398272 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.501368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.501408 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.501421 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.501440 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.501452 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.604980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.605052 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.605070 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.605094 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.605116 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.708486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.708571 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.708633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.708658 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.709271 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.811383 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.811486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.811505 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.811529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.811546 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.913606 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.913674 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.913692 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.913718 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.913737 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:38Z","lastTransitionTime":"2025-11-22T07:12:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.946939 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.947008 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:38 crc kubenswrapper[4929]: I1122 07:12:38.947014 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:38 crc kubenswrapper[4929]: E1122 07:12:38.947097 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:38 crc kubenswrapper[4929]: E1122 07:12:38.947254 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:38 crc kubenswrapper[4929]: E1122 07:12:38.947539 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.016882 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.016934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.016956 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.016979 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.016997 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.120173 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.120261 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.120281 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.120301 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.120315 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.223462 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.223494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.223504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.223517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.223526 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.326603 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.326666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.326683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.326711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.326726 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.429974 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.430048 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.430076 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.430101 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.430118 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.533624 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.533690 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.533716 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.533748 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.533765 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.637013 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.637072 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.637089 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.637112 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.637129 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.740453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.740547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.740567 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.740620 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.740637 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.843811 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.843900 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.843951 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.843973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.843989 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.946451 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:39 crc kubenswrapper[4929]: E1122 07:12:39.946761 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.947357 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.947435 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.947462 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.947495 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:39 crc kubenswrapper[4929]: I1122 07:12:39.947523 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:39Z","lastTransitionTime":"2025-11-22T07:12:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.050981 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.051159 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.051191 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.051249 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.051276 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.154041 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.154117 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.154141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.154197 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.154277 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.257105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.257168 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.257186 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.257239 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.257259 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.360470 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.360608 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.360647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.360683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.360704 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.464080 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.464149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.464168 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.464196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.464259 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.567704 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.567764 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.567782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.567805 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.567823 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.670596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.670735 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.670758 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.670782 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.670800 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.774431 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.774505 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.774522 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.774548 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.774566 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.877507 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.877573 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.877583 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.877601 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.877611 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.946800 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.946837 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.946980 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:40 crc kubenswrapper[4929]: E1122 07:12:40.947058 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:40 crc kubenswrapper[4929]: E1122 07:12:40.947350 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:40 crc kubenswrapper[4929]: E1122 07:12:40.947446 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.980034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.980091 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.980108 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.980132 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:40 crc kubenswrapper[4929]: I1122 07:12:40.980152 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:40Z","lastTransitionTime":"2025-11-22T07:12:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.083005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.083066 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.083083 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.083106 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.083124 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.186317 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.186381 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.186405 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.186435 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.186461 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.288439 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.288485 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.288496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.288511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.288522 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.390529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.390573 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.390586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.390601 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.390612 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.492935 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.492978 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.492989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.493005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.493014 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.596071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.596138 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.596165 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.596200 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.596264 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.699683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.699759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.699783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.699808 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.699827 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.803655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.803934 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.803956 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.803986 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.804013 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.906987 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.907042 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.907055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.907077 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.907091 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:41Z","lastTransitionTime":"2025-11-22T07:12:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:41 crc kubenswrapper[4929]: I1122 07:12:41.946697 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:41 crc kubenswrapper[4929]: E1122 07:12:41.946949 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.010020 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.010089 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.010109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.010134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.010152 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.112675 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.112711 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.112719 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.112731 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.112741 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.215028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.215082 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.215090 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.215104 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.215113 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.318244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.318286 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.318298 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.318314 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.318325 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.421509 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.421547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.421558 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.421574 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.421585 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.524659 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.524722 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.524738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.524762 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.524781 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.627488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.627540 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.627561 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.627588 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.627605 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.731396 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.731487 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.731515 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.731547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.731582 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.834819 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.835484 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.835576 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.835702 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.835842 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.938651 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.939042 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.939201 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.939450 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.939604 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:42Z","lastTransitionTime":"2025-11-22T07:12:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.946968 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.947032 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:42 crc kubenswrapper[4929]: I1122 07:12:42.947312 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:42 crc kubenswrapper[4929]: E1122 07:12:42.947430 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:42 crc kubenswrapper[4929]: E1122 07:12:42.947611 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:42 crc kubenswrapper[4929]: E1122 07:12:42.947863 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.043546 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.043814 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.043902 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.043982 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.044094 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.146433 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.146467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.146480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.146497 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.146508 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.249647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.249712 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.249727 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.249745 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.249757 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.352862 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.352928 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.352941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.352963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.352988 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.455684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.455743 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.455806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.455836 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.455858 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.558915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.558963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.558973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.558987 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.558996 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.662536 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.662618 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.662643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.662673 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.662694 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.765105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.765163 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.765183 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.765237 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.765256 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.868181 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.868280 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.868299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.868323 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.868342 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.946901 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:43 crc kubenswrapper[4929]: E1122 07:12:43.947059 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.964930 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:43Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.970613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.970667 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.970684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.970710 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.970732 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:43Z","lastTransitionTime":"2025-11-22T07:12:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:43 crc kubenswrapper[4929]: I1122 07:12:43.998092 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:43Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.019562 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.036383 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.050006 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.068934 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.073292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.073346 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.073364 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.073386 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.073401 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.081351 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.100032 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.125521 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.142185 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.158363 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176020 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176070 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176085 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176098 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.176844 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.196623 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.218281 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.241726 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.266616 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.279726 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.279793 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.279811 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.279836 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.279853 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.285057 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301205 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301263 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301275 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301306 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.301671 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.315717 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.320688 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.320911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.321049 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.321239 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.321395 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.322684 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.338999 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.343980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.344114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.344188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.344282 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.344359 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.360364 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.365182 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.365260 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.365278 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.365303 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.365323 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.381343 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.387402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.387454 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.387471 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.387494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.387516 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.403857 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:44Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.404082 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.405815 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.405927 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.405992 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.406063 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.406130 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.509118 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.509182 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.509194 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.509226 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.509240 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.611805 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.612136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.612149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.612166 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.612180 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.715514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.715572 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.715594 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.715619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.715637 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.818362 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.818423 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.818441 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.818467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.818484 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.921428 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.921472 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.921481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.921494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.921503 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:44Z","lastTransitionTime":"2025-11-22T07:12:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.946440 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.946537 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.946610 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:44 crc kubenswrapper[4929]: I1122 07:12:44.946634 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.946851 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:44 crc kubenswrapper[4929]: E1122 07:12:44.947056 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.023887 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.023965 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.023984 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.024009 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.024025 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.126011 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.126051 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.126060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.126074 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.126085 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.229125 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.229193 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.229248 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.229294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.229319 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.332530 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.332586 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.332603 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.332627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.332646 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.435600 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.435687 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.435707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.435765 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.435785 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.538414 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.538473 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.538492 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.538514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.538531 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.642065 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.642109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.642118 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.642133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.642144 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.744790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.744842 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.744851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.744865 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.744875 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.847176 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.847267 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.847284 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.847307 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.847328 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.947500 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:45 crc kubenswrapper[4929]: E1122 07:12:45.949335 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.949543 4929 scope.go:117] "RemoveContainer" containerID="5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.950569 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.950604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.950613 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.950627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:45 crc kubenswrapper[4929]: I1122 07:12:45.950637 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:45Z","lastTransitionTime":"2025-11-22T07:12:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.053388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.053432 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.053441 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.053457 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.053467 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.156442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.156498 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.156520 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.156550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.156574 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.259167 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.259250 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.259266 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.259286 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.259300 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.362514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.362578 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.362598 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.362622 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.362640 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.464576 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.464685 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.464706 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.464727 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.464741 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.567273 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.567339 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.567365 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.567399 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.567426 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.670590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.670654 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.670672 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.670697 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.670714 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.742022 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.742144 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.742244 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.742327 4929 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.742368 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:50.7423206 +0000 UTC m=+167.851774663 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.742420 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:13:50.742401932 +0000 UTC m=+167.851855995 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.742544 4929 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.742682 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 07:13:50.742646308 +0000 UTC m=+167.852100371 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.772872 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.772930 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.772947 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.772973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.772992 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.843164 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.843264 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843421 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843410 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843491 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843511 4929 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843441 4929 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843586 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 07:13:50.843561052 +0000 UTC m=+167.953015105 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843593 4929 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.843664 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 07:13:50.843641934 +0000 UTC m=+167.953096057 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.874898 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.874942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.874954 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.874970 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.874983 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.947071 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.947203 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.947264 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.947406 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.947581 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:46 crc kubenswrapper[4929]: E1122 07:12:46.947743 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.977817 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.977874 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.977894 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.977920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:46 crc kubenswrapper[4929]: I1122 07:12:46.977938 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:46Z","lastTransitionTime":"2025-11-22T07:12:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.081345 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.081407 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.081426 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.081451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.081469 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.184814 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.184872 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.184890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.184917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.184935 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.287706 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.287746 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.287758 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.287774 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.287786 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.390271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.390318 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.390331 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.390349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.390362 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.493295 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.493337 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.493347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.493364 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.493374 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.506768 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/1.log" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.509615 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.510195 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.525489 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.540585 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.553918 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.570152 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.580095 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.595905 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.597244 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.597271 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.597283 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.597299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.597312 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.618296 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.632974 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.649100 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.664271 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.677577 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.689886 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.699741 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.699779 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.699790 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.699805 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.699816 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.701256 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.710820 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.720419 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.729070 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.740356 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.753183 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.762521 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:47Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.802398 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.802491 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.802509 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.802525 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.802537 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.905451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.905493 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.905505 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.905525 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.905539 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:47Z","lastTransitionTime":"2025-11-22T07:12:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:47 crc kubenswrapper[4929]: I1122 07:12:47.946264 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:47 crc kubenswrapper[4929]: E1122 07:12:47.946423 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.029909 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.030005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.030023 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.030060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.030085 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.133514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.133547 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.133559 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.133575 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.133586 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.235541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.235617 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.235643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.235671 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.235693 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.338651 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.338695 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.338712 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.338732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.338747 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.441409 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.441467 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.441480 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.441497 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.441512 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.544877 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.544931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.544952 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.544981 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.545046 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.647818 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.647875 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.647893 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.647920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.647940 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.751568 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.751642 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.751660 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.751686 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.751708 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.855260 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.855330 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.855347 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.855371 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.855388 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.946446 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.946567 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.946665 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:48 crc kubenswrapper[4929]: E1122 07:12:48.946584 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:48 crc kubenswrapper[4929]: E1122 07:12:48.946782 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:48 crc kubenswrapper[4929]: E1122 07:12:48.946857 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.958550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.958612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.958625 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.958648 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:48 crc kubenswrapper[4929]: I1122 07:12:48.958663 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:48Z","lastTransitionTime":"2025-11-22T07:12:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.061702 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.061760 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.061777 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.061800 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.061819 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.165145 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.165203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.165250 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.165274 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.165292 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.267750 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.267811 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.267828 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.267851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.267868 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.370657 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.370723 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.370740 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.370765 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.370783 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.473175 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.473419 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.473461 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.473490 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.473508 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.520647 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/2.log" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.521592 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/1.log" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.525629 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9" exitCode=1 Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.525691 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.525746 4929 scope.go:117] "RemoveContainer" containerID="5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.527070 4929 scope.go:117] "RemoveContainer" containerID="c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9" Nov 22 07:12:49 crc kubenswrapper[4929]: E1122 07:12:49.527424 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.548020 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.567340 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.576783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.576810 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.576819 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.576833 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.576842 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.584314 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.600902 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.615150 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.634042 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.653914 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.669477 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.679753 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.679795 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.679806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.679821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.679834 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.683800 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.701824 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.712646 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.726189 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.745774 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.761572 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.781880 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.781920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.781930 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.781943 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.781952 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.792265 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.807352 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.823432 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.843729 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.859798 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:49Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.884852 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.884898 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.884915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.884938 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.884958 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.946927 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:49 crc kubenswrapper[4929]: E1122 07:12:49.947107 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.987052 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.987131 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.987150 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.987186 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:49 crc kubenswrapper[4929]: I1122 07:12:49.987364 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:49Z","lastTransitionTime":"2025-11-22T07:12:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.091091 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.091125 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.091133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.091171 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.091183 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.193962 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.194055 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.194075 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.194099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.194117 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.296062 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.296094 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.296111 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.296128 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.296140 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.398989 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.399037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.399049 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.399064 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.399076 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.502113 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.502169 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.502188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.502242 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.502262 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.530545 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/2.log" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.604904 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.604959 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.604978 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.605002 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.605020 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.707113 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.707173 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.707190 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.707240 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.707267 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.809647 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.809685 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.809694 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.809707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.809715 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.912355 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.912404 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.912415 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.912433 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.912448 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:50Z","lastTransitionTime":"2025-11-22T07:12:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.946838 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.946955 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:50 crc kubenswrapper[4929]: E1122 07:12:50.947169 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:50 crc kubenswrapper[4929]: I1122 07:12:50.947283 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:50 crc kubenswrapper[4929]: E1122 07:12:50.947452 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:50 crc kubenswrapper[4929]: E1122 07:12:50.947676 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.014875 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.014928 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.014946 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.014969 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.014986 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.118590 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.118706 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.118727 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.118756 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.118773 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.221099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.221141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.221149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.221165 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.221174 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.323952 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.323996 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.324006 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.324020 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.324029 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.427546 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.427607 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.427626 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.427651 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.427668 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.531738 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.531804 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.531821 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.531846 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.531863 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.638456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.638532 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.638544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.638563 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.638577 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.741341 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.741392 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.741404 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.741422 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.741434 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.844031 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.844092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.844102 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.844114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.844158 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946542 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946732 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946823 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946839 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946575 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:51 crc kubenswrapper[4929]: I1122 07:12:51.946850 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:51Z","lastTransitionTime":"2025-11-22T07:12:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:51 crc kubenswrapper[4929]: E1122 07:12:51.946972 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.050655 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.050693 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.050701 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.050715 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.050724 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.153203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.153292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.153311 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.153340 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.153364 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.256356 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.256402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.256415 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.256432 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.256444 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.359681 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.359749 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.359766 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.359789 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.359809 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.462995 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.463095 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.463111 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.463134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.463153 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.565924 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.565971 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.565983 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.566000 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.566011 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.668867 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.668925 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.668941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.668963 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.668978 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.772100 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.772145 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.772157 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.772174 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.772186 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.876013 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.876091 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.876109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.876134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.876155 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.947398 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.947514 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:52 crc kubenswrapper[4929]: E1122 07:12:52.947630 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.947524 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:52 crc kubenswrapper[4929]: E1122 07:12:52.947767 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:52 crc kubenswrapper[4929]: E1122 07:12:52.947944 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.978932 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.979003 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.979019 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.979071 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:52 crc kubenswrapper[4929]: I1122 07:12:52.979086 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:52Z","lastTransitionTime":"2025-11-22T07:12:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.082377 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.082442 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.082454 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.082469 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.082482 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.185149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.185356 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.185391 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.185423 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.185446 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.288037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.288092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.288109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.288133 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.288152 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.390718 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.390796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.390818 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.390847 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.390870 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.494848 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.495077 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.495166 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.495281 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.495314 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.599742 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.599953 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.599981 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.600008 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.600029 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.703232 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.703276 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.703288 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.703304 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.703317 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.806104 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.806144 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.806156 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.806173 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.806187 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.909158 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.909284 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.909316 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.909350 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.909377 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:53Z","lastTransitionTime":"2025-11-22T07:12:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.947328 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:53 crc kubenswrapper[4929]: E1122 07:12:53.947572 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.963760 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:53Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.975397 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:53Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:53 crc kubenswrapper[4929]: I1122 07:12:53.989135 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:53Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.003168 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.011539 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.011775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.011857 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.011941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.012017 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.017434 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.029086 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.039842 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.053401 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.065278 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.078580 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.092025 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.108291 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5ee33a987016f40b3dcd310ff551ca2cd43c6be08b14f149194ed36beb11c4aa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:32Z\\\",\\\"message\\\":\\\"flector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979365 6637 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979421 6637 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979561 6637 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.979759 6637 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979846 6637 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.979934 6637 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 07:12:31.980134 6637 reflector.go:311] Stopping reflector *v1.EgressService (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 07:12:31.980484 6637 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.113961 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.113996 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.114005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.114019 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.114028 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.120021 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.130277 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.142258 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.154526 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.165125 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.180832 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.205230 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.215638 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.215672 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.215684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.215700 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.215715 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.317853 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.317917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.317931 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.317947 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.317960 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.410379 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.410429 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.410444 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.410461 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.410473 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.429877 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.434955 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.435040 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.435067 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.435099 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.435127 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.451288 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.455625 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.455684 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.455703 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.455786 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.455809 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.472549 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.478395 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.478508 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.478526 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.478550 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.478569 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.498518 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.504016 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.504134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.504241 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.504279 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.504299 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.524087 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:12:54Z is after 2025-08-24T17:21:41Z" Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.524416 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.526708 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.526769 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.526785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.526807 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.526825 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.629514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.629587 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.629605 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.629631 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.629647 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.733032 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.733092 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.733110 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.733134 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.733152 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.837249 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.837309 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.837349 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.837388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.837414 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.940305 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.940366 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.940388 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.940408 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.940421 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:54Z","lastTransitionTime":"2025-11-22T07:12:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.946513 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.946564 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:54 crc kubenswrapper[4929]: I1122 07:12:54.946525 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.946676 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.946788 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:54 crc kubenswrapper[4929]: E1122 07:12:54.946943 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.043683 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.043754 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.043775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.043801 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.043818 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.147035 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.147106 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.147129 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.147161 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.147179 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.250496 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.250553 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.250571 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.250599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.250617 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.352983 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.353028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.353042 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.353061 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.353076 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.456358 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.456468 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.456486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.456504 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.456520 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.558742 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.558785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.558797 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.558812 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.558825 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.661551 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.661615 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.661627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.661645 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.661656 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.763808 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.763877 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.763890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.763910 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.763951 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.867188 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.867280 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.867292 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.867308 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.867320 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.947174 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:55 crc kubenswrapper[4929]: E1122 07:12:55.947418 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.969612 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.969659 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.969673 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.969690 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:55 crc kubenswrapper[4929]: I1122 07:12:55.969705 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:55Z","lastTransitionTime":"2025-11-22T07:12:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.071548 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.071619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.071643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.071671 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.071696 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.174838 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.174907 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.174930 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.174973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.174997 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.277402 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.277466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.277488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.277520 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.277542 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.380135 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.380204 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.380262 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.380288 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.380308 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.482787 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.482814 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.482822 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.482834 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.482843 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.585060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.585107 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.585121 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.585141 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.585153 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.688728 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.688802 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.688815 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.688833 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.688846 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.792006 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.792070 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.792089 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.792114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.792131 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.895457 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.895559 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.895573 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.895588 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.895599 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.946426 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:56 crc kubenswrapper[4929]: E1122 07:12:56.946538 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.946426 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:56 crc kubenswrapper[4929]: E1122 07:12:56.946732 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.946854 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:56 crc kubenswrapper[4929]: E1122 07:12:56.946929 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.998350 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.998407 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.998453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.998486 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:56 crc kubenswrapper[4929]: I1122 07:12:56.998507 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:56Z","lastTransitionTime":"2025-11-22T07:12:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.101451 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.101565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.101593 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.101629 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.101653 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.204948 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.205015 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.205034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.205062 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.205080 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.307166 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.307252 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.307268 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.307291 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.307309 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.410719 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.410781 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.410806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.410837 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.410861 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.513630 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.513677 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.513690 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.513705 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.513715 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.617087 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.617150 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.617169 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.617192 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.617255 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.719904 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.719938 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.719946 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.719959 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.719969 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.822131 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.822170 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.822178 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.822191 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.822199 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.925196 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.925272 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.925283 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.925301 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.925332 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:57Z","lastTransitionTime":"2025-11-22T07:12:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:57 crc kubenswrapper[4929]: I1122 07:12:57.946959 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:57 crc kubenswrapper[4929]: E1122 07:12:57.947112 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.027951 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.028017 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.028041 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.028068 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.028087 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.130806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.130887 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.130915 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.130947 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.130970 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.233785 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.233864 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.233890 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.233920 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.233941 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.337544 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.337619 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.337643 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.337672 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.337697 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.441069 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.441110 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.441119 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.441132 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.441141 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.543742 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.543783 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.543793 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.543809 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.543821 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.646143 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.646180 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.646203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.646235 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.646244 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.749417 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.749512 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.749551 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.749583 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.749609 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.852524 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.852569 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.852580 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.852599 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.852610 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.947095 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.947103 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.947498 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:12:58 crc kubenswrapper[4929]: E1122 07:12:58.947587 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:12:58 crc kubenswrapper[4929]: E1122 07:12:58.947831 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:12:58 crc kubenswrapper[4929]: E1122 07:12:58.947915 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.954996 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.955045 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.955063 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.955084 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:58 crc kubenswrapper[4929]: I1122 07:12:58.955098 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:58Z","lastTransitionTime":"2025-11-22T07:12:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.058696 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.058755 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.058773 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.058799 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.058817 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.161456 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.161530 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.161558 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.161589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.161607 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.264912 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.264960 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.264971 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.264988 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.265002 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.367941 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.368016 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.368028 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.368047 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.368058 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.470851 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.470911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.470927 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.470951 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.470965 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.573806 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.573871 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.573891 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.573917 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.573936 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.676186 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.676278 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.676295 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.676321 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.676338 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.779321 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.779403 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.779422 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.779447 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.779464 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.882258 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.882717 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.882735 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.882759 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.882775 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.946359 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:12:59 crc kubenswrapper[4929]: E1122 07:12:59.946543 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.989037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.989109 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.989136 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.989245 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:12:59 crc kubenswrapper[4929]: I1122 07:12:59.989274 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:12:59Z","lastTransitionTime":"2025-11-22T07:12:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.091980 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.092034 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.092052 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.092075 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.092091 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.195888 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.195962 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.195994 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.196026 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.196046 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.299400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.299501 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.299517 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.299540 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.299556 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.402369 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.402443 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.402457 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.402482 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.402498 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.505942 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.506010 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.506026 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.506048 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.506066 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.608903 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.608956 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.608973 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.608995 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.609012 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.711423 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.711490 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.711514 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.711541 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.711562 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.814396 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.814457 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.814481 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.814510 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.814530 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.916665 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.916734 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.916752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.916775 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.916793 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:00Z","lastTransitionTime":"2025-11-22T07:13:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.947078 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.947192 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:00 crc kubenswrapper[4929]: I1122 07:13:00.947203 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:00 crc kubenswrapper[4929]: E1122 07:13:00.947287 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:00 crc kubenswrapper[4929]: E1122 07:13:00.947359 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:00 crc kubenswrapper[4929]: E1122 07:13:00.947457 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.019676 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.019796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.019835 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.019864 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.019884 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.123353 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.123405 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.123414 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.123435 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.123446 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.226744 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.226820 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.226838 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.226858 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.226873 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.331278 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.331344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.331367 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.331400 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.331426 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.434559 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.434616 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.434629 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.434646 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.434658 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.538437 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.538530 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.538555 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.538589 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.538614 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.641873 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.641935 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.641953 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.641976 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.641993 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.745466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.745512 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.745527 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.745548 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.745563 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.849516 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.849580 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.849604 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.849633 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.849656 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.947279 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:01 crc kubenswrapper[4929]: E1122 07:13:01.947650 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.948627 4929 scope.go:117] "RemoveContainer" containerID="c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9" Nov 22 07:13:01 crc kubenswrapper[4929]: E1122 07:13:01.948890 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\"" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.953037 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.953084 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.953105 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.953127 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.953142 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:01Z","lastTransitionTime":"2025-11-22T07:13:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.973673 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:01 crc kubenswrapper[4929]: I1122 07:13:01.993719 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:01Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.016663 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.036316 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.055453 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.055546 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.055565 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.055591 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.055609 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.063335 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.090072 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.104170 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.118499 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.135050 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.148332 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.157512 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.157558 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.157570 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.157585 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.157596 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.169691 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.188383 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.202812 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.215363 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.228921 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.245005 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260203 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260280 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260172 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260294 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260511 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.260545 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.274544 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.287828 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.363267 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.363348 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.363372 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.363404 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.363431 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.465944 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.465979 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.465990 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.466005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.466016 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.569325 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.569535 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.569580 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.569627 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.569654 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.579906 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bb6rk_763619b4-b584-4089-bd56-96823e22e25e/kube-multus/0.log" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.579984 4929 generic.go:334] "Generic (PLEG): container finished" podID="763619b4-b584-4089-bd56-96823e22e25e" containerID="d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8" exitCode=1 Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.580034 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerDied","Data":"d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.580749 4929 scope.go:117] "RemoveContainer" containerID="d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.599794 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.619187 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.634399 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.646781 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.660753 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.672588 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.672634 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.672648 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.672666 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.672678 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.676726 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.695999 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.709507 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.721264 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.735575 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.747292 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.759566 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.775021 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.775060 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.775073 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.775091 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.775105 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.777812 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.798496 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.809160 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.822934 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.837493 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:02Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:13:01Z\\\",\\\"message\\\":\\\"2025-11-22T07:12:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff\\\\n2025-11-22T07:12:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff to /host/opt/cni/bin/\\\\n2025-11-22T07:12:14Z [verbose] multus-daemon started\\\\n2025-11-22T07:12:14Z [verbose] Readiness Indicator file check\\\\n2025-11-22T07:12:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.848964 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.862996 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:02Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.877636 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.877691 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.877707 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.877731 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.877749 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.946695 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.946751 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:02 crc kubenswrapper[4929]: E1122 07:13:02.946847 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:02 crc kubenswrapper[4929]: E1122 07:13:02.946997 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.947068 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:02 crc kubenswrapper[4929]: E1122 07:13:02.947661 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.981323 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.981384 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.981409 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.981438 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:02 crc kubenswrapper[4929]: I1122 07:13:02.981460 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:02Z","lastTransitionTime":"2025-11-22T07:13:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.084939 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.085005 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.085025 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.085509 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.085549 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.188387 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.188466 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.188494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.188529 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.188552 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.292009 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.292053 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.292062 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.292075 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.292084 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.394303 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.394359 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.394368 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.394380 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.394389 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.498728 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.498777 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.498796 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.498819 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.498837 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.586631 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bb6rk_763619b4-b584-4089-bd56-96823e22e25e/kube-multus/0.log" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.586706 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerStarted","Data":"a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.601249 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.601299 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.601319 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.601344 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.601362 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.607854 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.624917 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.646546 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.660870 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.678304 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.702723 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.704098 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.704140 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.704149 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.704169 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.704185 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.732418 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.751939 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.777454 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.798122 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.806906 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.806950 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.806961 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.806979 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.806991 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:03Z","lastTransitionTime":"2025-11-22T07:13:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.827837 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.839641 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.853837 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.867417 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:13:01Z\\\",\\\"message\\\":\\\"2025-11-22T07:12:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff\\\\n2025-11-22T07:12:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff to /host/opt/cni/bin/\\\\n2025-11-22T07:12:14Z [verbose] multus-daemon started\\\\n2025-11-22T07:12:14Z [verbose] Readiness Indicator file check\\\\n2025-11-22T07:12:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:13:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.876921 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.888545 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.902039 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: E1122 07:13:03.911083 4929 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.915408 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.926891 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.949875 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:03 crc kubenswrapper[4929]: E1122 07:13:03.949999 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.961017 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-pvjvh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f24bc88f-796b-4ca1-a2a0-fb7cc3f3c416\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7834768c5f99be870c46b9b88d9000384e0af2a9bbc742ea6a9b71e984f0d153\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dn9pl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:50Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-pvjvh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.974968 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"779ec211-1252-46f1-80bc-2373dc58be4a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5c64622c0c6fb8bfcc0f8a99287e133c9c807d0ccce3cf61588461881ec032f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c61f3dec48bbab7b91b709d4818e0a1a2456c4ce5da874c1be0dd704b255f386\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2xn9b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g9vj5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.985856 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03ff3be1-6ace-40d0-81aa-da787e652545\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cd240d4f5654cecdf3c7b62253d89025cad415f5b92ca3783699eac515dea7bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://166ded284f612ae2a7f1b2d0fc8ddb2058d166d5bddd20c1a89bf183f74bbaec\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:03 crc kubenswrapper[4929]: I1122 07:13:03.999878 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a430d9370f3fa4db978926039e6b5ec4c5c0a47bc1308efbba267232934bc42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:03Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.013724 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:58Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7d76eae70ff894a01259a022027096301199b4a167398c3d93c1c77b152cd635\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9da972257685e7701bfb6dea0fcd40f777b0ae56307337d9156447e666151447\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.034152 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"77273e11-9bf5-4480-bf99-266ae3f1ed7a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:12:48Z\\\",\\\"message\\\":\\\"ent handler 2 for removal\\\\nI1122 07:12:47.670574 6841 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 07:12:47.670580 6841 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1122 07:12:47.670685 6841 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 07:12:47.670675 6841 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 07:12:47.670716 6841 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1122 07:12:47.670723 6841 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 07:12:47.670727 6841 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1122 07:12:47.670742 6841 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 07:12:47.670755 6841 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 07:12:47.670788 6841 factory.go:656] Stopping watch factory\\\\nI1122 07:12:47.670814 6841 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 07:12:47.670814 6841 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1122 07:12:47.670919 6841 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1122 07:12:47.670970 6841 ovnkube.go:599] Stopped ovnkube\\\\nI1122 07:12:47.671003 6841 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 07:12:47.671108 6841 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:46Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-vkn7m_openshift-ovn-kubernetes(77273e11-9bf5-4480-bf99-266ae3f1ed7a)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-j4rhl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-vkn7m\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.048566 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c3debaa4-63c4-4b55-9aba-8ac4b82ff8d8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://56429b25ce90953cf1a102339a61f8b8bbe3ed682a9411532d03e476e66b2ef2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6cad4769ee15f7a18fc6538189d719e683480f5286b26081378fd3d0593bc7df\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://877ad69e59e897c0d55dd9d8ada07f4033cc22d551b901bd631c444f9e7272f5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebc965e949ed96a314c3c5039b8085a7e262ef3f3b293741319ed9382fb82bbc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d66dc3daa8d15ecef50c42e23e636bc0473cbe79273a599579386f4e04fcc3f8\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T07:11:43Z\\\",\\\"message\\\":\\\"ing back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1122 07:11:32.152789 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 07:11:32.153755 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2920995946/tls.crt::/tmp/serving-cert-2920995946/tls.key\\\\\\\"\\\\nI1122 07:11:41.050986 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 07:11:42.220527 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 07:11:42.220553 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 07:11:42.220574 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 07:11:42.220581 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 07:11:42.228192 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1122 07:11:42.228242 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 07:11:42.228255 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228265 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 07:11:42.228272 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1122 07:11:42.228277 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 07:11:42.228282 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 07:11:42.228288 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 07:11:42.234616 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fa20916865bbcdf8034f9bdb54749ae910ecdabe67e1383bd488f405ddbcba0a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ef68224692adbc95f1ff308ac16d675677473610b5646f0238bb24fc39892d57\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.063134 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2718c0e5-0a79-45a1-9bd0-0a1b7b17b930\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c84195c64b9de1139b7ca14d4f9ce38b7962d9a73510c30b967750181a824b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://94e3c3b2fc329767b77c781fdf473f11afcc4542d59284ad9f6a76eb477c5376\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9faee75f3b6c7f2a98a19fd9cb3256ec46b7751b9b39666c840a5168ca5923\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59f37527cfaddaee5127581e5bf045b381a4fbce22c0ad781889518fa4cdd165\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.077865 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.091508 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-bb6rk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"763619b4-b584-4089-bd56-96823e22e25e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T07:13:01Z\\\",\\\"message\\\":\\\"2025-11-22T07:12:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff\\\\n2025-11-22T07:12:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f6621a2a-870a-418e-88e8-3800eb5845ff to /host/opt/cni/bin/\\\\n2025-11-22T07:12:14Z [verbose] multus-daemon started\\\\n2025-11-22T07:12:14Z [verbose] Readiness Indicator file check\\\\n2025-11-22T07:12:59Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:13:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zwc8g\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-multus\"/\"multus-bb6rk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.103173 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-vzgwx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73f3e50e-3229-46e8-969b-e023922fdbce\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b841fbdc0711a04f991f2276f198090f8419a7c0542f6649b2cdea3cadd60a7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p26mb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:47Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-vzgwx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.116673 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"78d8d828-48aa-4499-a4af-54e0dd754349\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://267921a54f8c6cd58f1c3e278a2efeafdf5f6d62128c1d0ae9f82578a6674e03\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://212d788180ceac1f113eaee8804df4bff7ef692952c93a2db454e851c0878d44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ed014c5c312e86f93faaf7cca152c32e7f81899e19d547592ec7fecfbe25f75\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9caebd6822b60d3fb3842717816cf8288082e37452b40365b65c0c29e7eeb45f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://edfcc62fe7a7f2e11ca490e335ad78661af6acd34ba4725670b1675df11190e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e045380cd5b76262740a6210d274614b7377550525bf730c0112e0cc57c95522\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:17Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f105e3b09c0e926e340902e902f72e4d5936a9d381f89e3f06eb8e668e479bda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:12:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:12:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-clts9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-mtn8z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.134265 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"854261d6-ce00-412f-9470-239cb43b2dbd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://325c90854f3ee69278716ed048bee28980d42924ce4f37c1cd6d9ee927da3aeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a1cddbe403ecf4f46fef6ec1236eab6f80711aa586b7c26d35fb7866836376d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b89a6005286504c91b5e3cf6b30e979ff22355a674906ab2058cd47bd5b987b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://33220e1aa35dd41307005f18ba23375d01540d0244223bf15ccaa2990b22b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1ef58996805f5f11938e5cc92cb4b752cd774f0a1d2c963a6beaa2ae97d47657\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8a08b060390c56d545b0d7de6160304c78479151166d67740d3575697dfa2551\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea51e1480c6ec511ce248a71e10762cab5b3b44717383fe2d67d9cbad82c0756\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://81bdd29dbfa44d1a5655ce2372793108f0882c6251e552895503d8d20c9a3f26\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T07:11:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T07:11:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.145463 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:00Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e959b0da39934e8353bc020bf0a4c35cb8b041e9ecf14f8488cdeb77ddffa8b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.157176 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.168970 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"42cb9248-6b5b-4970-8232-68883ec65710\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:02Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r2mjg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:12:02Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-vmdgb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.180283 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"00c7aa53-2023-4402-a6a3-49f6894f5d2a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://25deec99f6429c70f265dad1ed336d76a6a489ab06974c84fd966078e161f95b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5eadae9b0d6e2e6f03da46676ceb8b0189674c416d26c10f678cf40b899039bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e7cd4f417a436f279a418d98db32f6cdcb2d82e9708fb7e4040665cba0280a7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6b1fdf609a9bc3386310015bb46bf95ca7d2960a0a9a171fe01409200a4a4ebd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:11:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:04Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.196701 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.209713 4929 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"470531cb-120c-48d9-80e1-adf074cf3055\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:11:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T07:12:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://266a69f11c4807b543540e1a2651bd48215ce3a58e5742d198c814461abd304b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T07:12:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97j5q\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T07:11:48Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dssfx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.233082 4929 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.763911 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.764527 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.764639 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.764740 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.764821 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:04Z","lastTransitionTime":"2025-11-22T07:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.779468 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.788488 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.788562 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.788577 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.788596 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.789015 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:04Z","lastTransitionTime":"2025-11-22T07:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.809825 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.814654 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.814720 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.814733 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.814752 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.814765 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:04Z","lastTransitionTime":"2025-11-22T07:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.827955 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.832114 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.832180 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.832192 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.832236 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.832250 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:04Z","lastTransitionTime":"2025-11-22T07:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.846944 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.851107 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.851415 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.851557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.851663 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.851758 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:04Z","lastTransitionTime":"2025-11-22T07:13:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.866851 4929 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T07:13:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"35e6b744-d50b-4680-8a02-13229aa01a6f\\\",\\\"systemUUID\\\":\\\"ef0002b7-0f16-47be-ac5a-3e7125d8469f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T07:13:04Z is after 2025-08-24T17:21:41Z" Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.867029 4929 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.947441 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.947476 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:04 crc kubenswrapper[4929]: I1122 07:13:04.947518 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.948154 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.948347 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:04 crc kubenswrapper[4929]: E1122 07:13:04.948005 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:05 crc kubenswrapper[4929]: I1122 07:13:05.947355 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:05 crc kubenswrapper[4929]: E1122 07:13:05.947723 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:06 crc kubenswrapper[4929]: I1122 07:13:06.166996 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:06 crc kubenswrapper[4929]: E1122 07:13:06.167265 4929 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:13:06 crc kubenswrapper[4929]: E1122 07:13:06.167393 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs podName:42cb9248-6b5b-4970-8232-68883ec65710 nodeName:}" failed. No retries permitted until 2025-11-22 07:14:10.167356418 +0000 UTC m=+187.276810481 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs") pod "network-metrics-daemon-vmdgb" (UID: "42cb9248-6b5b-4970-8232-68883ec65710") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 07:13:06 crc kubenswrapper[4929]: I1122 07:13:06.947413 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:06 crc kubenswrapper[4929]: I1122 07:13:06.947554 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:06 crc kubenswrapper[4929]: I1122 07:13:06.947688 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:06 crc kubenswrapper[4929]: E1122 07:13:06.947943 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:06 crc kubenswrapper[4929]: E1122 07:13:06.948078 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:06 crc kubenswrapper[4929]: E1122 07:13:06.948527 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:07 crc kubenswrapper[4929]: I1122 07:13:07.947111 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:07 crc kubenswrapper[4929]: E1122 07:13:07.947351 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:08 crc kubenswrapper[4929]: I1122 07:13:08.946372 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:08 crc kubenswrapper[4929]: I1122 07:13:08.946389 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:08 crc kubenswrapper[4929]: I1122 07:13:08.946539 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:08 crc kubenswrapper[4929]: E1122 07:13:08.946649 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:08 crc kubenswrapper[4929]: E1122 07:13:08.946697 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:08 crc kubenswrapper[4929]: E1122 07:13:08.946813 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:09 crc kubenswrapper[4929]: E1122 07:13:09.234685 4929 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 07:13:09 crc kubenswrapper[4929]: I1122 07:13:09.946657 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:09 crc kubenswrapper[4929]: E1122 07:13:09.946867 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:10 crc kubenswrapper[4929]: I1122 07:13:10.946300 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:10 crc kubenswrapper[4929]: I1122 07:13:10.946332 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:10 crc kubenswrapper[4929]: I1122 07:13:10.946310 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:10 crc kubenswrapper[4929]: E1122 07:13:10.946499 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:10 crc kubenswrapper[4929]: E1122 07:13:10.946634 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:10 crc kubenswrapper[4929]: E1122 07:13:10.946824 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:11 crc kubenswrapper[4929]: I1122 07:13:11.947382 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:11 crc kubenswrapper[4929]: E1122 07:13:11.947597 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:12 crc kubenswrapper[4929]: I1122 07:13:12.947045 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:12 crc kubenswrapper[4929]: I1122 07:13:12.947135 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:12 crc kubenswrapper[4929]: I1122 07:13:12.947058 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:12 crc kubenswrapper[4929]: E1122 07:13:12.947293 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:12 crc kubenswrapper[4929]: E1122 07:13:12.947482 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:12 crc kubenswrapper[4929]: E1122 07:13:12.947571 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:13 crc kubenswrapper[4929]: I1122 07:13:13.946869 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:13 crc kubenswrapper[4929]: E1122 07:13:13.947115 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:13 crc kubenswrapper[4929]: I1122 07:13:13.993304 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g9vj5" podStartSLOduration=86.993279075 podStartE2EDuration="1m26.993279075s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:13.978723341 +0000 UTC m=+131.088177374" watchObservedRunningTime="2025-11-22 07:13:13.993279075 +0000 UTC m=+131.102733108" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.011619 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=53.01160187 podStartE2EDuration="53.01160187s" podCreationTimestamp="2025-11-22 07:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:13.993540201 +0000 UTC m=+131.102994244" watchObservedRunningTime="2025-11-22 07:13:14.01160187 +0000 UTC m=+131.121055883" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.055538 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=90.055520108 podStartE2EDuration="1m30.055520108s" podCreationTimestamp="2025-11-22 07:11:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.055370295 +0000 UTC m=+131.164824308" watchObservedRunningTime="2025-11-22 07:13:14.055520108 +0000 UTC m=+131.164974121" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.055959 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-pvjvh" podStartSLOduration=87.055954019 podStartE2EDuration="1m27.055954019s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.039039927 +0000 UTC m=+131.148493950" watchObservedRunningTime="2025-11-22 07:13:14.055954019 +0000 UTC m=+131.165408032" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.067167 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=73.067148061 podStartE2EDuration="1m13.067148061s" podCreationTimestamp="2025-11-22 07:12:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.067083339 +0000 UTC m=+131.176537352" watchObservedRunningTime="2025-11-22 07:13:14.067148061 +0000 UTC m=+131.176602074" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.162528 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-vzgwx" podStartSLOduration=87.162511649 podStartE2EDuration="1m27.162511649s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.142566595 +0000 UTC m=+131.252020608" watchObservedRunningTime="2025-11-22 07:13:14.162511649 +0000 UTC m=+131.271965662" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.163023 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-mtn8z" podStartSLOduration=87.163016172 podStartE2EDuration="1m27.163016172s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.162629862 +0000 UTC m=+131.272083875" watchObservedRunningTime="2025-11-22 07:13:14.163016172 +0000 UTC m=+131.272470185" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.197376 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=92.197357147 podStartE2EDuration="1m32.197357147s" podCreationTimestamp="2025-11-22 07:11:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.182871204 +0000 UTC m=+131.292325227" watchObservedRunningTime="2025-11-22 07:13:14.197357147 +0000 UTC m=+131.306811160" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.223241 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-bb6rk" podStartSLOduration=87.223201555 podStartE2EDuration="1m27.223201555s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.222721773 +0000 UTC m=+131.332175786" watchObservedRunningTime="2025-11-22 07:13:14.223201555 +0000 UTC m=+131.332655568" Nov 22 07:13:14 crc kubenswrapper[4929]: E1122 07:13:14.235667 4929 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.248923 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=92.24890435 podStartE2EDuration="1m32.24890435s" podCreationTimestamp="2025-11-22 07:11:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.236849957 +0000 UTC m=+131.346303970" watchObservedRunningTime="2025-11-22 07:13:14.24890435 +0000 UTC m=+131.358358363" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.258912 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podStartSLOduration=87.258891303 podStartE2EDuration="1m27.258891303s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:14.258272808 +0000 UTC m=+131.367726841" watchObservedRunningTime="2025-11-22 07:13:14.258891303 +0000 UTC m=+131.368345326" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.946767 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.946805 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:14 crc kubenswrapper[4929]: I1122 07:13:14.946846 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:14 crc kubenswrapper[4929]: E1122 07:13:14.946884 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:14 crc kubenswrapper[4929]: E1122 07:13:14.947062 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:14 crc kubenswrapper[4929]: E1122 07:13:14.947163 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.079494 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.079533 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.079542 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.079557 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.079568 4929 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T07:13:15Z","lastTransitionTime":"2025-11-22T07:13:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.122599 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h"] Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.122974 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.125095 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.125295 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.125541 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.125975 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.274063 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbbd6411-ee7d-4431-b1f7-623e70449715-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.274109 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.274137 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dbbd6411-ee7d-4431-b1f7-623e70449715-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.274275 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.274328 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbbd6411-ee7d-4431-b1f7-623e70449715-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.375764 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbbd6411-ee7d-4431-b1f7-623e70449715-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.375813 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.375836 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dbbd6411-ee7d-4431-b1f7-623e70449715-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.375884 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.375909 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbbd6411-ee7d-4431-b1f7-623e70449715-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.376328 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.376357 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/dbbd6411-ee7d-4431-b1f7-623e70449715-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.377767 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dbbd6411-ee7d-4431-b1f7-623e70449715-service-ca\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.381894 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbbd6411-ee7d-4431-b1f7-623e70449715-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.396766 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dbbd6411-ee7d-4431-b1f7-623e70449715-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-bxs6h\" (UID: \"dbbd6411-ee7d-4431-b1f7-623e70449715\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.436745 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.635427 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" event={"ID":"dbbd6411-ee7d-4431-b1f7-623e70449715","Type":"ContainerStarted","Data":"2ff88852db53862368cea69d29ddbf8c5a9eb3cf3444d4474df99f7836b238fb"} Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.946322 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:15 crc kubenswrapper[4929]: E1122 07:13:15.946471 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:15 crc kubenswrapper[4929]: I1122 07:13:15.947746 4929 scope.go:117] "RemoveContainer" containerID="c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9" Nov 22 07:13:16 crc kubenswrapper[4929]: I1122 07:13:16.640576 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" event={"ID":"dbbd6411-ee7d-4431-b1f7-623e70449715","Type":"ContainerStarted","Data":"95188d40e67356fbe9f19ce32e8ab5a03e94ed5346815b3bba2edee2e0794b98"} Nov 22 07:13:16 crc kubenswrapper[4929]: I1122 07:13:16.946931 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:16 crc kubenswrapper[4929]: E1122 07:13:16.947145 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:16 crc kubenswrapper[4929]: I1122 07:13:16.947172 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:16 crc kubenswrapper[4929]: I1122 07:13:16.947197 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:16 crc kubenswrapper[4929]: E1122 07:13:16.947366 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:16 crc kubenswrapper[4929]: E1122 07:13:16.947514 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:17 crc kubenswrapper[4929]: I1122 07:13:17.645273 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/2.log" Nov 22 07:13:17 crc kubenswrapper[4929]: I1122 07:13:17.648192 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerStarted","Data":"465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82"} Nov 22 07:13:17 crc kubenswrapper[4929]: I1122 07:13:17.946708 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:17 crc kubenswrapper[4929]: E1122 07:13:17.946923 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:18 crc kubenswrapper[4929]: I1122 07:13:18.673432 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-bxs6h" podStartSLOduration=91.673409048 podStartE2EDuration="1m31.673409048s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:18.671860659 +0000 UTC m=+135.781314672" watchObservedRunningTime="2025-11-22 07:13:18.673409048 +0000 UTC m=+135.782863101" Nov 22 07:13:18 crc kubenswrapper[4929]: I1122 07:13:18.947128 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:18 crc kubenswrapper[4929]: I1122 07:13:18.947305 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:18 crc kubenswrapper[4929]: E1122 07:13:18.947414 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:18 crc kubenswrapper[4929]: I1122 07:13:18.947134 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:18 crc kubenswrapper[4929]: E1122 07:13:18.947499 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:18 crc kubenswrapper[4929]: E1122 07:13:18.947766 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:19 crc kubenswrapper[4929]: E1122 07:13:19.236685 4929 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 07:13:19 crc kubenswrapper[4929]: I1122 07:13:19.656568 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:13:19 crc kubenswrapper[4929]: I1122 07:13:19.687370 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podStartSLOduration=92.687352415 podStartE2EDuration="1m32.687352415s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:19.687203451 +0000 UTC m=+136.796657554" watchObservedRunningTime="2025-11-22 07:13:19.687352415 +0000 UTC m=+136.796806418" Nov 22 07:13:19 crc kubenswrapper[4929]: I1122 07:13:19.946715 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:19 crc kubenswrapper[4929]: E1122 07:13:19.946966 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:20 crc kubenswrapper[4929]: I1122 07:13:20.947063 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:20 crc kubenswrapper[4929]: I1122 07:13:20.947105 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:20 crc kubenswrapper[4929]: I1122 07:13:20.947063 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:20 crc kubenswrapper[4929]: E1122 07:13:20.947311 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:20 crc kubenswrapper[4929]: E1122 07:13:20.947423 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:20 crc kubenswrapper[4929]: E1122 07:13:20.947562 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:21 crc kubenswrapper[4929]: I1122 07:13:21.929739 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-vmdgb"] Nov 22 07:13:21 crc kubenswrapper[4929]: I1122 07:13:21.929909 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:21 crc kubenswrapper[4929]: E1122 07:13:21.930052 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:22 crc kubenswrapper[4929]: I1122 07:13:22.946262 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:22 crc kubenswrapper[4929]: E1122 07:13:22.946718 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 07:13:22 crc kubenswrapper[4929]: I1122 07:13:22.946406 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:22 crc kubenswrapper[4929]: I1122 07:13:22.946482 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:22 crc kubenswrapper[4929]: E1122 07:13:22.946943 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 07:13:22 crc kubenswrapper[4929]: E1122 07:13:22.947071 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 07:13:23 crc kubenswrapper[4929]: I1122 07:13:23.949590 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:23 crc kubenswrapper[4929]: E1122 07:13:23.949681 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-vmdgb" podUID="42cb9248-6b5b-4970-8232-68883ec65710" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.946678 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.946754 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.946753 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.949188 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.949197 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.949286 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 22 07:13:24 crc kubenswrapper[4929]: I1122 07:13:24.949235 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.487507 4929 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.519540 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.520025 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.520628 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-hf68t"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.521095 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.522766 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.525735 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nnnk9"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.530676 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.532776 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.534699 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.541366 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.541795 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.541889 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.542424 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.542638 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.542737 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.542798 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.542896 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.543129 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.543346 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.543634 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.543839 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.544196 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.544827 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.545126 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.553080 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-htp4z"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.553833 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.554230 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7hfk"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.554636 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.554729 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555061 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555200 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555578 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555786 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555889 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.555987 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556098 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556225 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556553 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556649 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556789 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.556815 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.557514 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.557865 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.558166 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xfmc5"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.558459 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.558952 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.560146 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.561098 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.561524 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.562261 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.562742 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.562776 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.562876 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-2zrch"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563069 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563202 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563226 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563457 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563560 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563581 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563675 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563367 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.564366 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563372 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563772 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.564508 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.564615 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.564716 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.564806 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.563846 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.565024 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.565232 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.565233 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.565731 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.566481 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.579541 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.580408 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.580489 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.580752 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581086 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581345 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4xxhl"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581376 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.583150 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.583436 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581424 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.583747 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.583862 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581557 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.581752 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.582586 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584366 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.582739 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.582933 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.582988 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584421 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584658 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584808 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584658 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.584806 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.598651 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.598952 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-hf68t"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.598982 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599134 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-node-pullsecrets\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599163 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599183 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbp28\" (UniqueName: \"kubernetes.io/projected/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-kube-api-access-sbp28\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599202 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599247 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-client\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599269 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599283 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-config\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599300 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d9c21569-09b7-4ba1-823a-28d373220f18-machine-approver-tls\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599349 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-encryption-config\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599386 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-etcd-serving-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599401 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-policies\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599419 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599474 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csmrm\" (UniqueName: \"kubernetes.io/projected/a410f03e-218e-4646-9b41-17a32af9330d-kube-api-access-csmrm\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599483 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599495 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt9t4\" (UniqueName: \"kubernetes.io/projected/226685e6-2207-4c2f-b5a9-6377efa0a23e-kube-api-access-dt9t4\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599555 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/226685e6-2207-4c2f-b5a9-6377efa0a23e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599601 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-auth-proxy-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599633 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13f25e5d-8635-419c-aeef-d65724935962-serving-cert\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599637 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599648 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-serving-cert\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599728 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599746 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7x26\" (UniqueName: \"kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599774 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-service-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599790 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599804 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-audit\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599823 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-etcd-client\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599844 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-serving-cert\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599860 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599877 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-encryption-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599891 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599916 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599914 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8j8k\" (UniqueName: \"kubernetes.io/projected/13f25e5d-8635-419c-aeef-d65724935962-kube-api-access-p8j8k\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599971 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599916 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.599985 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600001 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-audit-dir\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600008 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600016 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6vkd\" (UniqueName: \"kubernetes.io/projected/d9c21569-09b7-4ba1-823a-28d373220f18-kube-api-access-b6vkd\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600051 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-image-import-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600066 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-dir\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600081 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/226685e6-2207-4c2f-b5a9-6377efa0a23e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600285 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600664 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600721 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600801 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600921 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.600985 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601092 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601193 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601301 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601423 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601525 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601550 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601580 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601615 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601659 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601702 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601525 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601823 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601884 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.601933 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.602279 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nnnk9"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.607534 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vnrtj"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.602401 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.608178 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.604811 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.608463 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.608774 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.608855 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.608906 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605044 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.603168 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605125 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605185 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605354 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605704 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605722 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605761 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605784 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.605834 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.611401 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.615805 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.618098 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.618408 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.618714 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.620809 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.624153 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.624721 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.625858 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.626476 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.626477 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.628479 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.628746 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.629319 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.651001 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.651372 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.656593 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.657541 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.659309 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2jnpc"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.659948 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.662932 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.667280 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.669821 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7hfk"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.677563 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.678178 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.680377 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.680812 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.683831 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.683846 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.685033 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.685712 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.687270 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.688368 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.688505 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.689664 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.690511 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.690571 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.690861 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.692085 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.692651 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-vp5z4"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.693260 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.695332 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.696029 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.696930 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.698534 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-htp4z"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.699914 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700564 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-client\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700631 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lntf\" (UniqueName: \"kubernetes.io/projected/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-kube-api-access-9lntf\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700654 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-config\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700683 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700706 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-config\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700739 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-config\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700762 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d9c21569-09b7-4ba1-823a-28d373220f18-machine-approver-tls\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700785 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c74d8b5a-3054-4132-b375-8956e231f4ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700809 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-encryption-config\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700831 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrx4h\" (UniqueName: \"kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700851 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700871 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-webhook-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700892 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8fa808f-cd52-4783-a63a-af98d9359ec2-serving-cert\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700915 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-etcd-serving-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700936 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-policies\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700958 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.700978 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701001 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701023 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-service-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701049 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt9t4\" (UniqueName: \"kubernetes.io/projected/226685e6-2207-4c2f-b5a9-6377efa0a23e-kube-api-access-dt9t4\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701071 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csmrm\" (UniqueName: \"kubernetes.io/projected/a410f03e-218e-4646-9b41-17a32af9330d-kube-api-access-csmrm\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701093 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701128 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/226685e6-2207-4c2f-b5a9-6377efa0a23e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701166 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c74d8b5a-3054-4132-b375-8956e231f4ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-auth-proxy-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701272 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-apiservice-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701288 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-tmpfs\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701305 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13f25e5d-8635-419c-aeef-d65724935962-serving-cert\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701322 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-serving-cert\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701339 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701354 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701373 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7x26\" (UniqueName: \"kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701388 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-trusted-ca\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701401 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-srv-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701425 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-service-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701444 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx4kp\" (UniqueName: \"kubernetes.io/projected/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-kube-api-access-mx4kp\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701467 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-audit\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701481 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-serving-cert\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701497 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701514 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-etcd-client\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701531 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-serving-cert\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701553 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701575 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-encryption-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701598 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701620 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701638 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701654 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8j8k\" (UniqueName: \"kubernetes.io/projected/13f25e5d-8635-419c-aeef-d65724935962-kube-api-access-p8j8k\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701678 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-audit-dir\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701694 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-client\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701711 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c74d8b5a-3054-4132-b375-8956e231f4ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701726 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-dir\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701742 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/226685e6-2207-4c2f-b5a9-6377efa0a23e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701763 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701788 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6vkd\" (UniqueName: \"kubernetes.io/projected/d9c21569-09b7-4ba1-823a-28d373220f18-kube-api-access-b6vkd\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701805 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-image-import-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701824 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbjwp\" (UniqueName: \"kubernetes.io/projected/d8fa808f-cd52-4783-a63a-af98d9359ec2-kube-api-access-jbjwp\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701844 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvszk\" (UniqueName: \"kubernetes.io/projected/47615a4c-c032-42d8-935a-8a2fbb40c0af-kube-api-access-zvszk\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701866 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701887 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701911 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbp28\" (UniqueName: \"kubernetes.io/projected/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-kube-api-access-sbp28\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701933 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.701959 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-node-pullsecrets\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.702049 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-node-pullsecrets\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.702589 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.705888 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.706056 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.706141 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-rwvvh"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.707027 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.708472 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.708842 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.709181 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-config\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.709706 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/226685e6-2207-4c2f-b5a9-6377efa0a23e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.709994 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a410f03e-218e-4646-9b41-17a32af9330d-audit-dir\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.710020 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.710142 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-dir\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.710790 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-etcd-serving-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.710789 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-service-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.711530 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-image-import-ca\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.711545 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-audit\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.712755 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.712989 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.713282 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.713578 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-audit-policies\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.714118 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a410f03e-218e-4646-9b41-17a32af9330d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.714497 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-etcd-client\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.714563 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/13f25e5d-8635-419c-aeef-d65724935962-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.715723 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-encryption-config\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.715854 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hr5lv"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.716493 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.717032 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/226685e6-2207-4c2f-b5a9-6377efa0a23e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.717523 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.718173 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.718764 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-etcd-client\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.718813 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vnrtj"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.720652 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.720738 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.721549 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d9c21569-09b7-4ba1-823a-28d373220f18-auth-proxy-config\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.721836 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-serving-cert\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.722250 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a410f03e-218e-4646-9b41-17a32af9330d-encryption-config\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.722428 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-serving-cert\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.722526 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/d9c21569-09b7-4ba1-823a-28d373220f18-machine-approver-tls\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.722613 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.724142 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2zrch"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.725079 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/13f25e5d-8635-419c-aeef-d65724935962-serving-cert\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.725704 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.726923 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.728244 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.731811 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.733932 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.735422 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.737344 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.739321 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.741916 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.742402 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.744859 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hr5lv"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.746554 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-xglp4"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.747017 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.749138 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2jnpc"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.752723 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-ljvnn"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.753719 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.757336 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.757965 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljvnn"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.759627 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.760701 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.761791 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xfmc5"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.762397 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.764443 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.765084 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.765991 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.767731 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4xxhl"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.768404 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.769469 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.770488 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rwvvh"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.771577 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw"] Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.782607 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802281 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-config\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802580 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c74d8b5a-3054-4132-b375-8956e231f4ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802600 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrx4h\" (UniqueName: \"kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802623 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802645 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8fa808f-cd52-4783-a63a-af98d9359ec2-serving-cert\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802664 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-webhook-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802687 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802713 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802737 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-service-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802782 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802803 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c74d8b5a-3054-4132-b375-8956e231f4ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802837 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-apiservice-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802861 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-tmpfs\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802884 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802907 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-srv-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802946 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-trusted-ca\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802971 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx4kp\" (UniqueName: \"kubernetes.io/projected/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-kube-api-access-mx4kp\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.802994 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-serving-cert\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803034 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-client\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803058 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c74d8b5a-3054-4132-b375-8956e231f4ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803088 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803114 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbjwp\" (UniqueName: \"kubernetes.io/projected/d8fa808f-cd52-4783-a63a-af98d9359ec2-kube-api-access-jbjwp\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803138 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvszk\" (UniqueName: \"kubernetes.io/projected/47615a4c-c032-42d8-935a-8a2fbb40c0af-kube-api-access-zvszk\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803164 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803235 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lntf\" (UniqueName: \"kubernetes.io/projected/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-kube-api-access-9lntf\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803257 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-config\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.803318 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-config\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.804281 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.804416 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.804929 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-tmpfs\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.805023 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d8fa808f-cd52-4783-a63a-af98d9359ec2-trusted-ca\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.805156 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.805163 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.805430 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-service-ca\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.805716 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.806113 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-config\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.807182 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.808276 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-etcd-client\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.808919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.809396 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d8fa808f-cd52-4783-a63a-af98d9359ec2-serving-cert\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.813702 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-serving-cert\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.822359 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.842198 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.862426 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.882881 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.902462 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.929617 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.942580 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.946645 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.963034 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 22 07:13:25 crc kubenswrapper[4929]: I1122 07:13:25.982880 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.002160 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.022754 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.042604 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.082774 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.103488 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.123201 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.127683 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c74d8b5a-3054-4132-b375-8956e231f4ac-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.143084 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.144722 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c74d8b5a-3054-4132-b375-8956e231f4ac-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.163203 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.183092 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.203547 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.208124 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-apiservice-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.209117 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-webhook-cert\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.223287 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.242968 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.264767 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.283509 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.288669 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.302879 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.322755 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.342881 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.362507 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.383449 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.402926 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.424162 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.429631 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/47615a4c-c032-42d8-935a-8a2fbb40c0af-srv-cert\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.443190 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.461703 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.483021 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.503081 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.522192 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.542715 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.562429 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.588863 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.622637 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.642995 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.662638 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.680790 4929 request.go:700] Waited for 1.017324201s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/secrets?fieldSelector=metadata.name%3Dservice-ca-dockercfg-pn86c&limit=500&resourceVersion=0 Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.682406 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.702940 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.726652 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.748011 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.762657 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.782852 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.802679 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.822286 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.842924 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.862110 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.882715 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.902107 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.922447 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.943114 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.961834 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 22 07:13:26 crc kubenswrapper[4929]: I1122 07:13:26.982681 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.003445 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.023481 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.043820 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.063322 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.083092 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.103130 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.122522 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.142655 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.162854 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.183089 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.203744 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.222916 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.242916 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.262567 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.282917 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.303040 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.323430 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.344132 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.362987 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.400046 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt9t4\" (UniqueName: \"kubernetes.io/projected/226685e6-2207-4c2f-b5a9-6377efa0a23e-kube-api-access-dt9t4\") pod \"openshift-apiserver-operator-796bbdcf4f-n9lkq\" (UID: \"226685e6-2207-4c2f-b5a9-6377efa0a23e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.417625 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csmrm\" (UniqueName: \"kubernetes.io/projected/a410f03e-218e-4646-9b41-17a32af9330d-kube-api-access-csmrm\") pod \"apiserver-76f77b778f-nnnk9\" (UID: \"a410f03e-218e-4646-9b41-17a32af9330d\") " pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.419302 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.437002 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7x26\" (UniqueName: \"kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26\") pod \"controller-manager-879f6c89f-hxxfj\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.464333 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8j8k\" (UniqueName: \"kubernetes.io/projected/13f25e5d-8635-419c-aeef-d65724935962-kube-api-access-p8j8k\") pod \"authentication-operator-69f744f599-hf68t\" (UID: \"13f25e5d-8635-419c-aeef-d65724935962\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.476646 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6vkd\" (UniqueName: \"kubernetes.io/projected/d9c21569-09b7-4ba1-823a-28d373220f18-kube-api-access-b6vkd\") pod \"machine-approver-56656f9798-h8b2l\" (UID: \"d9c21569-09b7-4ba1-823a-28d373220f18\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.501863 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbp28\" (UniqueName: \"kubernetes.io/projected/3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6-kube-api-access-sbp28\") pod \"apiserver-7bbb656c7d-wmd4n\" (UID: \"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.503000 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.524082 4929 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.543348 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.562786 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.583416 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.592612 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq"] Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.603187 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.622458 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.642777 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.651078 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.659491 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.663277 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.679375 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" event={"ID":"226685e6-2207-4c2f-b5a9-6377efa0a23e","Type":"ContainerStarted","Data":"1711a4735f7bf366d55204beb50582f838796b6211e6c028dfabbc5ed4ad5cac"} Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.681378 4929 request.go:700] Waited for 1.927063852s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.683332 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.699489 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.720065 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrx4h\" (UniqueName: \"kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h\") pod \"console-f9d7485db-tqwhs\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.741899 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c74d8b5a-3054-4132-b375-8956e231f4ac-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-m2bg6\" (UID: \"c74d8b5a-3054-4132-b375-8956e231f4ac\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.746672 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.746715 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.762272 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx4kp\" (UniqueName: \"kubernetes.io/projected/f2646e8d-e1a9-40ef-bfe3-51135e6d767e-kube-api-access-mx4kp\") pod \"etcd-operator-b45778765-xfmc5\" (UID: \"f2646e8d-e1a9-40ef-bfe3-51135e6d767e\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.779329 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbjwp\" (UniqueName: \"kubernetes.io/projected/d8fa808f-cd52-4783-a63a-af98d9359ec2-kube-api-access-jbjwp\") pod \"console-operator-58897d9998-b7hfk\" (UID: \"d8fa808f-cd52-4783-a63a-af98d9359ec2\") " pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.793343 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.799155 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvszk\" (UniqueName: \"kubernetes.io/projected/47615a4c-c032-42d8-935a-8a2fbb40c0af-kube-api-access-zvszk\") pod \"olm-operator-6b444d44fb-nbrph\" (UID: \"47615a4c-c032-42d8-935a-8a2fbb40c0af\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.808965 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.818143 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lntf\" (UniqueName: \"kubernetes.io/projected/70a7e3ab-45ef-4c7a-bd14-ac4d75df2020-kube-api-access-9lntf\") pod \"packageserver-d55dfcdfc-d4gn6\" (UID: \"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.823552 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.840784 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.843372 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.867291 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.895943 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.927470 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.927532 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.927557 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.927798 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.927835 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e050819-4ad1-4091-bc11-2c808fb19f36-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928150 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cfmt\" (UniqueName: \"kubernetes.io/projected/6e050819-4ad1-4091-bc11-2c808fb19f36-kube-api-access-4cfmt\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928178 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbmkx\" (UniqueName: \"kubernetes.io/projected/8336bed9-3f96-44a6-89a0-d6440caa4eee-kube-api-access-nbmkx\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928266 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b576b64c-589c-409a-b3b8-482e9066c45a-config\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928316 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-config\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928363 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928416 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650a26c5-072d-4d2e-bc26-181e8a4658dc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928503 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b957d93-142e-42ea-b527-ab92b32b2f8c-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928550 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928574 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928607 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928678 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b957d93-142e-42ea-b527-ab92b32b2f8c-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928730 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wcrt\" (UniqueName: \"kubernetes.io/projected/d5baff14-8d60-47bf-a0a7-42a485b60a96-kube-api-access-7wcrt\") pod \"migrator-59844c95c7-k72qt\" (UID: \"d5baff14-8d60-47bf-a0a7-42a485b60a96\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928753 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-images\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928798 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkpkg\" (UniqueName: \"kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928829 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b576b64c-589c-409a-b3b8-482e9066c45a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928850 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bt2x\" (UniqueName: \"kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928904 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928972 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.928995 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwrqh\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929099 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjzh5\" (UniqueName: \"kubernetes.io/projected/54a8c9df-7649-46eb-afdd-054fa0c1f5eb-kube-api-access-jjzh5\") pod \"downloads-7954f5f757-2zrch\" (UID: \"54a8c9df-7649-46eb-afdd-054fa0c1f5eb\") " pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929130 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6d0965a-9351-48ba-82f0-0c1623041c7e-trusted-ca\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929154 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929195 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929251 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929272 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929291 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929329 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9hv55\" (UniqueName: \"kubernetes.io/projected/9ced1bca-93c6-442e-b0b3-aea22cfde35d-kube-api-access-9hv55\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.929987 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j57v7\" (UniqueName: \"kubernetes.io/projected/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-kube-api-access-j57v7\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930045 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930069 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnrk9\" (UniqueName: \"kubernetes.io/projected/c3f44df6-8db0-4f77-ae46-f361e5d8948f-kube-api-access-hnrk9\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930092 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930126 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b576b64c-589c-409a-b3b8-482e9066c45a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930161 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krxdw\" (UniqueName: \"kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930186 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930252 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.930274 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932162 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932198 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ced1bca-93c6-442e-b0b3-aea22cfde35d-metrics-tls\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932272 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9rgp\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-kube-api-access-p9rgp\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932442 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6d0965a-9351-48ba-82f0-0c1623041c7e-metrics-tls\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932474 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932636 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932688 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3f44df6-8db0-4f77-ae46-f361e5d8948f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.932982 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.933012 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650a26c5-072d-4d2e-bc26-181e8a4658dc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.933036 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8336bed9-3f96-44a6-89a0-d6440caa4eee-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934604 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934719 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934749 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-serving-cert\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934774 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934798 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934821 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzmq7\" (UniqueName: \"kubernetes.io/projected/650a26c5-072d-4d2e-bc26-181e8a4658dc-kube-api-access-hzmq7\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934846 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j4rk\" (UniqueName: \"kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934870 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bhkv\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-kube-api-access-2bhkv\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934890 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.934987 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.937259 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-hf68t"] Nov 22 07:13:27 crc kubenswrapper[4929]: E1122 07:13:27.937506 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.437492536 +0000 UTC m=+145.546946549 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.938812 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:27 crc kubenswrapper[4929]: I1122 07:13:27.950875 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.001484 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-nnnk9"] Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.016357 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda410f03e_218e_4646_9b41_17a32af9330d.slice/crio-7f5409deb26a0ad0756f877ceff39580431016cf3fbd6e16c06420a09692731f WatchSource:0}: Error finding container 7f5409deb26a0ad0756f877ceff39580431016cf3fbd6e16c06420a09692731f: Status 404 returned error can't find the container with id 7f5409deb26a0ad0756f877ceff39580431016cf3fbd6e16c06420a09692731f Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.038770 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039018 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039046 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039067 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-stats-auth\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039085 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55xhb\" (UniqueName: \"kubernetes.io/projected/5d850ad3-bdf5-479f-9ca7-da300763391c-kube-api-access-55xhb\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039103 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-serving-cert\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039120 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzmq7\" (UniqueName: \"kubernetes.io/projected/650a26c5-072d-4d2e-bc26-181e8a4658dc-kube-api-access-hzmq7\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039136 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/70cd497c-e503-4088-ba79-c3d684026d40-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039153 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039170 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039191 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-proxy-tls\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039224 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d850ad3-bdf5-479f-9ca7-da300763391c-service-ca-bundle\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039249 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j4rk\" (UniqueName: \"kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039267 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bhkv\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-kube-api-access-2bhkv\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039283 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039302 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-srv-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039318 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a77d942c-13ee-4835-b773-7dbe3dc4ef76-cert\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039344 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039361 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-registration-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039396 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039413 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039429 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-cabundle\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039446 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039464 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039481 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-default-certificate\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039506 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e050819-4ad1-4091-bc11-2c808fb19f36-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039528 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cfmt\" (UniqueName: \"kubernetes.io/projected/6e050819-4ad1-4091-bc11-2c808fb19f36-kube-api-access-4cfmt\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039543 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbmkx\" (UniqueName: \"kubernetes.io/projected/8336bed9-3f96-44a6-89a0-d6440caa4eee-kube-api-access-nbmkx\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039559 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-plugins-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039585 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b576b64c-589c-409a-b3b8-482e9066c45a-config\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039603 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-config\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039628 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039646 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtj6n\" (UniqueName: \"kubernetes.io/projected/964d3766-8968-4ce0-b68c-bf839937e0d3-kube-api-access-vtj6n\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039674 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039703 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwfr8\" (UniqueName: \"kubernetes.io/projected/c4607e1c-c400-490e-96aa-3f995008f6d8-kube-api-access-nwfr8\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039718 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70cd497c-e503-4088-ba79-c3d684026d40-config\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039737 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650a26c5-072d-4d2e-bc26-181e8a4658dc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039764 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039779 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039799 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp9j6\" (UniqueName: \"kubernetes.io/projected/79a4c973-d95f-4898-81b8-00b97f3c2aa4-kube-api-access-wp9j6\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039823 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/964d3766-8968-4ce0-b68c-bf839937e0d3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039845 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70cd497c-e503-4088-ba79-c3d684026d40-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039871 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b957d93-142e-42ea-b527-ab92b32b2f8c-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039916 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-images\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039947 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b957d93-142e-42ea-b527-ab92b32b2f8c-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039966 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48m4j\" (UniqueName: \"kubernetes.io/projected/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-kube-api-access-48m4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039981 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-key\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.039998 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wcrt\" (UniqueName: \"kubernetes.io/projected/d5baff14-8d60-47bf-a0a7-42a485b60a96-kube-api-access-7wcrt\") pod \"migrator-59844c95c7-k72qt\" (UID: \"d5baff14-8d60-47bf-a0a7-42a485b60a96\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040014 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-images\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040031 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkpkg\" (UniqueName: \"kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040046 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b576b64c-589c-409a-b3b8-482e9066c45a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040061 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bt2x\" (UniqueName: \"kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040077 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040094 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/524eccdd-f85b-47c0-8c0a-d251ff814500-metrics-tls\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040119 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.040136 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwrqh\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.040335 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.540318948 +0000 UTC m=+145.649772961 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.041251 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-available-featuregates\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.041412 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b576b64c-589c-409a-b3b8-482e9066c45a-config\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.041792 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.042372 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.042556 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-config\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.042564 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5b957d93-142e-42ea-b527-ab92b32b2f8c-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.042587 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.043183 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c3f44df6-8db0-4f77-ae46-f361e5d8948f-images\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.043783 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.044242 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/650a26c5-072d-4d2e-bc26-181e8a4658dc-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.044777 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.045668 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.045790 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.046973 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.047690 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjzh5\" (UniqueName: \"kubernetes.io/projected/54a8c9df-7649-46eb-afdd-054fa0c1f5eb-kube-api-access-jjzh5\") pod \"downloads-7954f5f757-2zrch\" (UID: \"54a8c9df-7649-46eb-afdd-054fa0c1f5eb\") " pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.047847 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6d0965a-9351-48ba-82f0-0c1623041c7e-trusted-ca\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.048964 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6d0965a-9351-48ba-82f0-0c1623041c7e-trusted-ca\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.049689 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-serving-cert\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.049766 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050096 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050110 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050130 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050185 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwgwl\" (UniqueName: \"kubernetes.io/projected/a77d942c-13ee-4835-b773-7dbe3dc4ef76-kube-api-access-fwgwl\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050260 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050326 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050354 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050377 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050400 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-certs\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050421 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbcqz\" (UniqueName: \"kubernetes.io/projected/6650d266-80d2-46b5-9da3-f42cee1cc658-kube-api-access-dbcqz\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050489 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9hv55\" (UniqueName: \"kubernetes.io/projected/9ced1bca-93c6-442e-b0b3-aea22cfde35d-kube-api-access-9hv55\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050514 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a4c973-d95f-4898-81b8-00b97f3c2aa4-config\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050577 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j57v7\" (UniqueName: \"kubernetes.io/projected/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-kube-api-access-j57v7\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050612 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050655 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-csi-data-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050686 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnrk9\" (UniqueName: \"kubernetes.io/projected/c3f44df6-8db0-4f77-ae46-f361e5d8948f-kube-api-access-hnrk9\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050735 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050774 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79a4c973-d95f-4898-81b8-00b97f3c2aa4-serving-cert\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050799 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b576b64c-589c-409a-b3b8-482e9066c45a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050822 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq755\" (UniqueName: \"kubernetes.io/projected/524eccdd-f85b-47c0-8c0a-d251ff814500-kube-api-access-qq755\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050849 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krxdw\" (UniqueName: \"kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050897 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050915 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.050929 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.051955 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-proxy-tls\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.052010 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-socket-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.052068 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.052101 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.052108 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.052176 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-node-bootstrap-token\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.053574 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.054161 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdj4b\" (UniqueName: \"kubernetes.io/projected/d83571fe-a17e-49ad-a121-b58f90d914d7-kube-api-access-rdj4b\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.054195 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67q4x\" (UniqueName: \"kubernetes.io/projected/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-kube-api-access-67q4x\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.054857 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055081 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055091 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055357 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055754 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ced1bca-93c6-442e-b0b3-aea22cfde35d-metrics-tls\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055839 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055877 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9rgp\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-kube-api-access-p9rgp\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055912 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-metrics-certs\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055935 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.055957 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2zd\" (UniqueName: \"kubernetes.io/projected/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-kube-api-access-hc2zd\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.056001 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z7gn\" (UniqueName: \"kubernetes.io/projected/8891c9c1-14b1-4057-87d8-fed0883277c5-kube-api-access-7z7gn\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.056024 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-mountpoint-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.056049 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6d0965a-9351-48ba-82f0-0c1623041c7e-metrics-tls\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.056602 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057015 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3f44df6-8db0-4f77-ae46-f361e5d8948f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057067 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057665 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057800 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057903 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057965 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-profile-collector-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.057996 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4607e1c-c400-490e-96aa-3f995008f6d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058034 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/6e050819-4ad1-4091-bc11-2c808fb19f36-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058055 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058040 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mwvb\" (UniqueName: \"kubernetes.io/projected/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-kube-api-access-7mwvb\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058318 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524eccdd-f85b-47c0-8c0a-d251ff814500-config-volume\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058407 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058418 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058509 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650a26c5-072d-4d2e-bc26-181e8a4658dc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058537 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8336bed9-3f96-44a6-89a0-d6440caa4eee-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.058904 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/5b957d93-142e-42ea-b527-ab92b32b2f8c-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.062764 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/8336bed9-3f96-44a6-89a0-d6440caa4eee-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.062800 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.062928 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9ced1bca-93c6-442e-b0b3-aea22cfde35d-metrics-tls\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.063508 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c3f44df6-8db0-4f77-ae46-f361e5d8948f-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.063959 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b6d0965a-9351-48ba-82f0-0c1623041c7e-metrics-tls\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.065259 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b576b64c-589c-409a-b3b8-482e9066c45a-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.065939 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.072644 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/650a26c5-072d-4d2e-bc26-181e8a4658dc-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.072686 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.079199 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwrqh\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.097015 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cfmt\" (UniqueName: \"kubernetes.io/projected/6e050819-4ad1-4091-bc11-2c808fb19f36-kube-api-access-4cfmt\") pod \"multus-admission-controller-857f4d67dd-vnrtj\" (UID: \"6e050819-4ad1-4091-bc11-2c808fb19f36\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.126555 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbmkx\" (UniqueName: \"kubernetes.io/projected/8336bed9-3f96-44a6-89a0-d6440caa4eee-kube-api-access-nbmkx\") pod \"cluster-samples-operator-665b6dd947-gwj27\" (UID: \"8336bed9-3f96-44a6-89a0-d6440caa4eee\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.138546 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkpkg\" (UniqueName: \"kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg\") pod \"marketplace-operator-79b997595-qp8xw\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.148408 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.161610 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162039 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-profile-collector-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162058 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4607e1c-c400-490e-96aa-3f995008f6d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162084 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mwvb\" (UniqueName: \"kubernetes.io/projected/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-kube-api-access-7mwvb\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162107 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524eccdd-f85b-47c0-8c0a-d251ff814500-config-volume\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162150 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-stats-auth\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162173 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55xhb\" (UniqueName: \"kubernetes.io/projected/5d850ad3-bdf5-479f-9ca7-da300763391c-kube-api-access-55xhb\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162275 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/70cd497c-e503-4088-ba79-c3d684026d40-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162307 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-proxy-tls\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162326 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d850ad3-bdf5-479f-9ca7-da300763391c-service-ca-bundle\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162352 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-srv-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162373 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a77d942c-13ee-4835-b773-7dbe3dc4ef76-cert\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162409 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-registration-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162434 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-cabundle\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162458 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-default-certificate\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162483 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-plugins-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162515 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162539 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtj6n\" (UniqueName: \"kubernetes.io/projected/964d3766-8968-4ce0-b68c-bf839937e0d3-kube-api-access-vtj6n\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162569 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwfr8\" (UniqueName: \"kubernetes.io/projected/c4607e1c-c400-490e-96aa-3f995008f6d8-kube-api-access-nwfr8\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162589 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70cd497c-e503-4088-ba79-c3d684026d40-config\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162618 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp9j6\" (UniqueName: \"kubernetes.io/projected/79a4c973-d95f-4898-81b8-00b97f3c2aa4-kube-api-access-wp9j6\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162650 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/964d3766-8968-4ce0-b68c-bf839937e0d3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162675 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70cd497c-e503-4088-ba79-c3d684026d40-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162706 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162732 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-images\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162755 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48m4j\" (UniqueName: \"kubernetes.io/projected/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-kube-api-access-48m4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162773 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-key\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162809 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/524eccdd-f85b-47c0-8c0a-d251ff814500-metrics-tls\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162871 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwgwl\" (UniqueName: \"kubernetes.io/projected/a77d942c-13ee-4835-b773-7dbe3dc4ef76-kube-api-access-fwgwl\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162907 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-certs\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162931 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbcqz\" (UniqueName: \"kubernetes.io/projected/6650d266-80d2-46b5-9da3-f42cee1cc658-kube-api-access-dbcqz\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162968 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a4c973-d95f-4898-81b8-00b97f3c2aa4-config\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.162992 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-csi-data-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163026 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79a4c973-d95f-4898-81b8-00b97f3c2aa4-serving-cert\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163052 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq755\" (UniqueName: \"kubernetes.io/projected/524eccdd-f85b-47c0-8c0a-d251ff814500-kube-api-access-qq755\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163101 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-proxy-tls\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163124 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-socket-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163169 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163192 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67q4x\" (UniqueName: \"kubernetes.io/projected/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-kube-api-access-67q4x\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163235 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-node-bootstrap-token\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163263 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdj4b\" (UniqueName: \"kubernetes.io/projected/d83571fe-a17e-49ad-a121-b58f90d914d7-kube-api-access-rdj4b\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163296 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163323 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-metrics-certs\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163350 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2zd\" (UniqueName: \"kubernetes.io/projected/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-kube-api-access-hc2zd\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163377 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5d850ad3-bdf5-479f-9ca7-da300763391c-service-ca-bundle\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163383 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z7gn\" (UniqueName: \"kubernetes.io/projected/8891c9c1-14b1-4057-87d8-fed0883277c5-kube-api-access-7z7gn\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163440 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-mountpoint-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.163535 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-mountpoint-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.164398 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bt2x\" (UniqueName: \"kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x\") pod \"route-controller-manager-6576b87f9c-lcg6c\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.164563 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-plugins-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.164706 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.165255 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-auth-proxy-config\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.166345 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.666328656 +0000 UTC m=+145.775782749 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.166379 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-images\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.166922 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/524eccdd-f85b-47c0-8c0a-d251ff814500-config-volume\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.166950 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70cd497c-e503-4088-ba79-c3d684026d40-config\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.167004 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-profile-collector-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.167026 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-registration-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.167091 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-socket-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.167577 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/d83571fe-a17e-49ad-a121-b58f90d914d7-csi-data-dir\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.167991 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.168305 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79a4c973-d95f-4898-81b8-00b97f3c2aa4-config\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.170258 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-cabundle\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.172225 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/524eccdd-f85b-47c0-8c0a-d251ff814500-metrics-tls\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.173433 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a77d942c-13ee-4835-b773-7dbe3dc4ef76-cert\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.180800 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-signing-key\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.180870 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/964d3766-8968-4ce0-b68c-bf839937e0d3-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.185597 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/8891c9c1-14b1-4057-87d8-fed0883277c5-srv-cert\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186020 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-stats-auth\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186129 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-default-certificate\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186519 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/70cd497c-e503-4088-ba79-c3d684026d40-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186609 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bhkv\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-kube-api-access-2bhkv\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186647 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-proxy-tls\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.186991 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5d850ad3-bdf5-479f-9ca7-da300763391c-metrics-certs\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.187482 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-proxy-tls\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.188327 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.189265 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-node-bootstrap-token\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.189490 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79a4c973-d95f-4898-81b8-00b97f3c2aa4-serving-cert\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.189950 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xfmc5"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.191733 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/6650d266-80d2-46b5-9da3-f42cee1cc658-certs\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.201978 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c4607e1c-c400-490e-96aa-3f995008f6d8-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.202836 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j4rk\" (UniqueName: \"kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk\") pod \"oauth-openshift-558db77b4-mp7vd\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.222005 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzmq7\" (UniqueName: \"kubernetes.io/projected/650a26c5-072d-4d2e-bc26-181e8a4658dc-kube-api-access-hzmq7\") pod \"openshift-controller-manager-operator-756b6f6bc6-f2px2\" (UID: \"650a26c5-072d-4d2e-bc26-181e8a4658dc\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.231791 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.244391 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wcrt\" (UniqueName: \"kubernetes.io/projected/d5baff14-8d60-47bf-a0a7-42a485b60a96-kube-api-access-7wcrt\") pod \"migrator-59844c95c7-k72qt\" (UID: \"d5baff14-8d60-47bf-a0a7-42a485b60a96\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.257465 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.264394 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.265249 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.76522781 +0000 UTC m=+145.874681823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.270415 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.276823 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjzh5\" (UniqueName: \"kubernetes.io/projected/54a8c9df-7649-46eb-afdd-054fa0c1f5eb-kube-api-access-jjzh5\") pod \"downloads-7954f5f757-2zrch\" (UID: \"54a8c9df-7649-46eb-afdd-054fa0c1f5eb\") " pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.283130 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j57v7\" (UniqueName: \"kubernetes.io/projected/75d7a4b2-62f1-4a8b-af0f-3f3efc291883-kube-api-access-j57v7\") pod \"openshift-config-operator-7777fb866f-4ww8c\" (UID: \"75d7a4b2-62f1-4a8b-af0f-3f3efc291883\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.299922 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krxdw\" (UniqueName: \"kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw\") pod \"collect-profiles-29396580-z9fcm\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.301106 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.304013 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.308780 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7hfk"] Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.311930 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde2ba535_d661_453d_b4cd_19c6e7628b0c.slice/crio-722fcf86a8cdca52dffd33cc1a24d418605b36767e96a4d67c830613d14dbfd2 WatchSource:0}: Error finding container 722fcf86a8cdca52dffd33cc1a24d418605b36767e96a4d67c830613d14dbfd2: Status 404 returned error can't find the container with id 722fcf86a8cdca52dffd33cc1a24d418605b36767e96a4d67c830613d14dbfd2 Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.321403 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.336961 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.338286 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnrk9\" (UniqueName: \"kubernetes.io/projected/c3f44df6-8db0-4f77-ae46-f361e5d8948f-kube-api-access-hnrk9\") pod \"machine-api-operator-5694c8668f-htp4z\" (UID: \"c3f44df6-8db0-4f77-ae46-f361e5d8948f\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.340160 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b3f6540_7bc1_45b3_be62_51a9cf6ddeb6.slice/crio-7056f3844486eae85a784a095138e2f53a88238245cc95469ed7a6f7ea4cd935 WatchSource:0}: Error finding container 7056f3844486eae85a784a095138e2f53a88238245cc95469ed7a6f7ea4cd935: Status 404 returned error can't find the container with id 7056f3844486eae85a784a095138e2f53a88238245cc95469ed7a6f7ea4cd935 Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.354975 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8fa808f_cd52_4783_a63a_af98d9359ec2.slice/crio-eec33fe0e7a5f49ec226d4a5f7589b941a847d6dfebee474480f3e27a3fdf54a WatchSource:0}: Error finding container eec33fe0e7a5f49ec226d4a5f7589b941a847d6dfebee474480f3e27a3fdf54a: Status 404 returned error can't find the container with id eec33fe0e7a5f49ec226d4a5f7589b941a847d6dfebee474480f3e27a3fdf54a Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.360794 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.366512 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.366866 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.866855331 +0000 UTC m=+145.976309344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.367308 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.372887 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.380673 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.397422 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.406085 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9hv55\" (UniqueName: \"kubernetes.io/projected/9ced1bca-93c6-442e-b0b3-aea22cfde35d-kube-api-access-9hv55\") pod \"dns-operator-744455d44c-4xxhl\" (UID: \"9ced1bca-93c6-442e-b0b3-aea22cfde35d\") " pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.406320 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27"] Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.407584 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47615a4c_c032_42d8_935a_8a2fbb40c0af.slice/crio-c1fe15f6802640442fff32c6ccf09178af65c58871ed245389a3f7a41e70531a WatchSource:0}: Error finding container c1fe15f6802640442fff32c6ccf09178af65c58871ed245389a3f7a41e70531a: Status 404 returned error can't find the container with id c1fe15f6802640442fff32c6ccf09178af65c58871ed245389a3f7a41e70531a Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.426840 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9rgp\" (UniqueName: \"kubernetes.io/projected/5b957d93-142e-42ea-b527-ab92b32b2f8c-kube-api-access-p9rgp\") pod \"cluster-image-registry-operator-dc59b4c8b-5tlxw\" (UID: \"5b957d93-142e-42ea-b527-ab92b32b2f8c\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.445884 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b576b64c-589c-409a-b3b8-482e9066c45a-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-lsnjk\" (UID: \"b576b64c-589c-409a-b3b8-482e9066c45a\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.454581 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.463062 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b6d0965a-9351-48ba-82f0-0c1623041c7e-bound-sa-token\") pod \"ingress-operator-5b745b69d9-25lrd\" (UID: \"b6d0965a-9351-48ba-82f0-0c1623041c7e\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.467240 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.467459 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.467736 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.467921 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.967890728 +0000 UTC m=+146.077344801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.468346 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.468733 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:28.968713519 +0000 UTC m=+146.078167602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.474037 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.480843 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.485460 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.487570 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z7gn\" (UniqueName: \"kubernetes.io/projected/8891c9c1-14b1-4057-87d8-fed0883277c5-kube-api-access-7z7gn\") pod \"catalog-operator-68c6474976-r9sq7\" (UID: \"8891c9c1-14b1-4057-87d8-fed0883277c5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.497561 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc74d8b5a_3054_4132_b375_8956e231f4ac.slice/crio-3155276dbbac8bbd5895139d65bace32a224a18272e09d867c6dcab7833d8f27 WatchSource:0}: Error finding container 3155276dbbac8bbd5895139d65bace32a224a18272e09d867c6dcab7833d8f27: Status 404 returned error can't find the container with id 3155276dbbac8bbd5895139d65bace32a224a18272e09d867c6dcab7833d8f27 Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.502190 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-vnrtj"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.503042 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.507369 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwfr8\" (UniqueName: \"kubernetes.io/projected/c4607e1c-c400-490e-96aa-3f995008f6d8-kube-api-access-nwfr8\") pod \"package-server-manager-789f6589d5-m7pl4\" (UID: \"c4607e1c-c400-490e-96aa-3f995008f6d8\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.517376 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70a7e3ab_45ef_4c7a_bd14_ac4d75df2020.slice/crio-50a13d59cb2e50a260c71c6a478f515608c6edaabbaa6e85df7eb9be75b113a7 WatchSource:0}: Error finding container 50a13d59cb2e50a260c71c6a478f515608c6edaabbaa6e85df7eb9be75b113a7: Status 404 returned error can't find the container with id 50a13d59cb2e50a260c71c6a478f515608c6edaabbaa6e85df7eb9be75b113a7 Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.520886 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtj6n\" (UniqueName: \"kubernetes.io/projected/964d3766-8968-4ce0-b68c-bf839937e0d3-kube-api-access-vtj6n\") pod \"control-plane-machine-set-operator-78cbb6b69f-k7jgs\" (UID: \"964d3766-8968-4ce0-b68c-bf839937e0d3\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.540380 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/70cd497c-e503-4088-ba79-c3d684026d40-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-9wd5b\" (UID: \"70cd497c-e503-4088-ba79-c3d684026d40\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: W1122 07:13:28.541397 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e050819_4ad1_4091_bc11_2c808fb19f36.slice/crio-7965a7920b88bcf70f1d8bf26535f16616cac396b5deb368548d2da212beab73 WatchSource:0}: Error finding container 7965a7920b88bcf70f1d8bf26535f16616cac396b5deb368548d2da212beab73: Status 404 returned error can't find the container with id 7965a7920b88bcf70f1d8bf26535f16616cac396b5deb368548d2da212beab73 Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.544193 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.572562 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55xhb\" (UniqueName: \"kubernetes.io/projected/5d850ad3-bdf5-479f-9ca7-da300763391c-kube-api-access-55xhb\") pod \"router-default-5444994796-vp5z4\" (UID: \"5d850ad3-bdf5-479f-9ca7-da300763391c\") " pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.572568 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.572645 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.072625297 +0000 UTC m=+146.182079370 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.573082 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.573431 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.073418097 +0000 UTC m=+146.182872110 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.574133 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.581817 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp9j6\" (UniqueName: \"kubernetes.io/projected/79a4c973-d95f-4898-81b8-00b97f3c2aa4-kube-api-access-wp9j6\") pod \"service-ca-operator-777779d784-wbnx6\" (UID: \"79a4c973-d95f-4898-81b8-00b97f3c2aa4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.588930 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.595864 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mwvb\" (UniqueName: \"kubernetes.io/projected/ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb-kube-api-access-7mwvb\") pod \"service-ca-9c57cc56f-2jnpc\" (UID: \"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb\") " pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.596443 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.615691 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.615791 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.618033 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48m4j\" (UniqueName: \"kubernetes.io/projected/3db640c6-77ea-4bb7-a250-2b82f20eb4c6-kube-api-access-48m4j\") pod \"kube-storage-version-migrator-operator-b67b599dd-4lzcw\" (UID: \"3db640c6-77ea-4bb7-a250-2b82f20eb4c6\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.625055 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.639250 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.642976 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdj4b\" (UniqueName: \"kubernetes.io/projected/d83571fe-a17e-49ad-a121-b58f90d914d7-kube-api-access-rdj4b\") pod \"csi-hostpathplugin-hr5lv\" (UID: \"d83571fe-a17e-49ad-a121-b58f90d914d7\") " pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.644700 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.662635 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbcqz\" (UniqueName: \"kubernetes.io/projected/6650d266-80d2-46b5-9da3-f42cee1cc658-kube-api-access-dbcqz\") pod \"machine-config-server-xglp4\" (UID: \"6650d266-80d2-46b5-9da3-f42cee1cc658\") " pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.673847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.674404 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.174386472 +0000 UTC m=+146.283840485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.682953 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67q4x\" (UniqueName: \"kubernetes.io/projected/a3233e89-e2e8-4172-ab0c-ff6f190f9fe7-kube-api-access-67q4x\") pod \"machine-config-controller-84d6567774-67z2d\" (UID: \"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.685687 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.690119 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" event={"ID":"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020","Type":"ContainerStarted","Data":"50a13d59cb2e50a260c71c6a478f515608c6edaabbaa6e85df7eb9be75b113a7"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.692441 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" event={"ID":"13f25e5d-8635-419c-aeef-d65724935962","Type":"ContainerStarted","Data":"ec2df4373ffa7a4222d06c230bf757055d4b56069c0e3545bf346b8a584af030"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.692475 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" event={"ID":"13f25e5d-8635-419c-aeef-d65724935962","Type":"ContainerStarted","Data":"defe7a10ed05f20f10c005173cde0ee5d64b976273f0e19b490aa82d16d41c03"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.694234 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-xglp4" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.698122 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq755\" (UniqueName: \"kubernetes.io/projected/524eccdd-f85b-47c0-8c0a-d251ff814500-kube-api-access-qq755\") pod \"dns-default-rwvvh\" (UID: \"524eccdd-f85b-47c0-8c0a-d251ff814500\") " pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.703569 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" event={"ID":"c74d8b5a-3054-4132-b375-8956e231f4ac","Type":"ContainerStarted","Data":"3155276dbbac8bbd5895139d65bace32a224a18272e09d867c6dcab7833d8f27"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.707623 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqwhs" event={"ID":"de2ba535-d661-453d-b4cd-19c6e7628b0c","Type":"ContainerStarted","Data":"722fcf86a8cdca52dffd33cc1a24d418605b36767e96a4d67c830613d14dbfd2"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.712877 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" event={"ID":"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6","Type":"ContainerStarted","Data":"7056f3844486eae85a784a095138e2f53a88238245cc95469ed7a6f7ea4cd935"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.717082 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" event={"ID":"6e050819-4ad1-4091-bc11-2c808fb19f36","Type":"ContainerStarted","Data":"7965a7920b88bcf70f1d8bf26535f16616cac396b5deb368548d2da212beab73"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.717367 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.718470 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" event={"ID":"a410f03e-218e-4646-9b41-17a32af9330d","Type":"ContainerStarted","Data":"7f5409deb26a0ad0756f877ceff39580431016cf3fbd6e16c06420a09692731f"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.719887 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" event={"ID":"226685e6-2207-4c2f-b5a9-6377efa0a23e","Type":"ContainerStarted","Data":"123bd5bf81f32c72bca79cd86e19621ed42b4d942c8f4d90214c866f2c15c470"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.722940 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" event={"ID":"47615a4c-c032-42d8-935a-8a2fbb40c0af","Type":"ContainerStarted","Data":"c1fe15f6802640442fff32c6ccf09178af65c58871ed245389a3f7a41e70531a"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.724309 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" event={"ID":"d9c21569-09b7-4ba1-823a-28d373220f18","Type":"ContainerStarted","Data":"afd3252c770e4a71356a0dda7cf794a7ede64f0f18fc7c65547a34c0e29a2ff2"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.724332 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" event={"ID":"d9c21569-09b7-4ba1-823a-28d373220f18","Type":"ContainerStarted","Data":"f7ce508953d3ce4eafa400898a6a7c76e4aa1ec293d84ce7cf71bdbd4ae18d8b"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.724529 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwgwl\" (UniqueName: \"kubernetes.io/projected/a77d942c-13ee-4835-b773-7dbe3dc4ef76-kube-api-access-fwgwl\") pod \"ingress-canary-ljvnn\" (UID: \"a77d942c-13ee-4835-b773-7dbe3dc4ef76\") " pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.736967 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" event={"ID":"f2646e8d-e1a9-40ef-bfe3-51135e6d767e","Type":"ContainerStarted","Data":"51415f3d2011f2c2d2e43d0b46bed092e22af452d284a0993a04a2829678abf7"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.739327 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2zd\" (UniqueName: \"kubernetes.io/projected/68abc8a7-8068-4f85-ac04-ecdcaff7bab4-kube-api-access-hc2zd\") pod \"machine-config-operator-74547568cd-jdvf9\" (UID: \"68abc8a7-8068-4f85-ac04-ecdcaff7bab4\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.750854 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" event={"ID":"a917968f-39da-4359-a8cc-3f3bf28e5ab6","Type":"ContainerStarted","Data":"60be21b1a40acd44b688d3a9dd22c032d3408875ffe72c6aa9b2b31f90e40903"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.750919 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" event={"ID":"a917968f-39da-4359-a8cc-3f3bf28e5ab6","Type":"ContainerStarted","Data":"ee19b405e8fb1e4f934f6e8801f9a1a35f5283378399c87fc1e0b52632cc9b7d"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.751338 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.755967 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" event={"ID":"d8fa808f-cd52-4783-a63a-af98d9359ec2","Type":"ContainerStarted","Data":"eec33fe0e7a5f49ec226d4a5f7589b941a847d6dfebee474480f3e27a3fdf54a"} Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.769455 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.776375 4929 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-hxxfj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.776521 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.777682 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.777967 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt"] Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.778105 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.278090786 +0000 UTC m=+146.387544799 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.786609 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.878738 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.892829 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.392804243 +0000 UTC m=+146.502258256 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.893036 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:28 crc kubenswrapper[4929]: E1122 07:13:28.897020 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.396941846 +0000 UTC m=+146.506395869 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.897864 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.902200 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-htp4z"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.906162 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.930141 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.949846 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:13:28 crc kubenswrapper[4929]: I1122 07:13:28.954736 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:28.995274 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:28.995546 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.495530072 +0000 UTC m=+146.604984085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.002241 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljvnn" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.003483 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-2zrch"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.045572 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd"] Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.071517 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ba449ab_e58d_4b27_9f93_8dd3d784a077.slice/crio-f9a7df6ef31f81fd4ac409d18e4c80f1ab69dceee44a1018292881fcb1db595c WatchSource:0}: Error finding container f9a7df6ef31f81fd4ac409d18e4c80f1ab69dceee44a1018292881fcb1db595c: Status 404 returned error can't find the container with id f9a7df6ef31f81fd4ac409d18e4c80f1ab69dceee44a1018292881fcb1db595c Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.078004 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-4xxhl"] Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.082846 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75d7a4b2_62f1_4a8b_af0f_3f3efc291883.slice/crio-24fb8efaf9f20aeacb482fc320c7de6016fdd1a793eab9961c78504f1ae6de45 WatchSource:0}: Error finding container 24fb8efaf9f20aeacb482fc320c7de6016fdd1a793eab9961c78504f1ae6de45: Status 404 returned error can't find the container with id 24fb8efaf9f20aeacb482fc320c7de6016fdd1a793eab9961c78504f1ae6de45 Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.095403 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3f44df6_8db0_4f77_ae46_f361e5d8948f.slice/crio-ecc1d3739a8422d5e0317b4787c2ab87b749182b2f281c129336d692cab0a96b WatchSource:0}: Error finding container ecc1d3739a8422d5e0317b4787c2ab87b749182b2f281c129336d692cab0a96b: Status 404 returned error can't find the container with id ecc1d3739a8422d5e0317b4787c2ab87b749182b2f281c129336d692cab0a96b Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.096082 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.096498 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.596482877 +0000 UTC m=+146.705936900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.112947 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod54a8c9df_7649_46eb_afdd_054fa0c1f5eb.slice/crio-c2d4a1039fdde5c26932ed95db2be12ac43fe0a8f528e64d1a08510975a7c13e WatchSource:0}: Error finding container c2d4a1039fdde5c26932ed95db2be12ac43fe0a8f528e64d1a08510975a7c13e: Status 404 returned error can't find the container with id c2d4a1039fdde5c26932ed95db2be12ac43fe0a8f528e64d1a08510975a7c13e Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.113908 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6d0965a_9351_48ba_82f0_0c1623041c7e.slice/crio-9cd7b6c5a7c1f425cc6f03713b34dc8b2413c5c08d897a5d4f135b8bb4bf2fbe WatchSource:0}: Error finding container 9cd7b6c5a7c1f425cc6f03713b34dc8b2413c5c08d897a5d4f135b8bb4bf2fbe: Status 404 returned error can't find the container with id 9cd7b6c5a7c1f425cc6f03713b34dc8b2413c5c08d897a5d4f135b8bb4bf2fbe Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.129958 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.175026 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm"] Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.183660 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ced1bca_93c6_442e_b0b3_aea22cfde35d.slice/crio-e6f03e5a04828cb2c72d636fe6c4b4352a5e33bdbe3f92677eb21ed30c4d3e86 WatchSource:0}: Error finding container e6f03e5a04828cb2c72d636fe6c4b4352a5e33bdbe3f92677eb21ed30c4d3e86: Status 404 returned error can't find the container with id e6f03e5a04828cb2c72d636fe6c4b4352a5e33bdbe3f92677eb21ed30c4d3e86 Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.186386 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod650a26c5_072d_4d2e_bc26_181e8a4658dc.slice/crio-afd900a822c7baaf5dc3b1f2e9ae273a0f7c07dd95bbf67c63dbbf861f7b3885 WatchSource:0}: Error finding container afd900a822c7baaf5dc3b1f2e9ae273a0f7c07dd95bbf67c63dbbf861f7b3885: Status 404 returned error can't find the container with id afd900a822c7baaf5dc3b1f2e9ae273a0f7c07dd95bbf67c63dbbf861f7b3885 Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.196719 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.197171 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.697150004 +0000 UTC m=+146.806604017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.297975 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.298484 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.798468708 +0000 UTC m=+146.907922721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.361425 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.398682 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.398772 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.898750076 +0000 UTC m=+147.008204089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.399051 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.399383 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:29.899372672 +0000 UTC m=+147.008826745 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.433551 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-hf68t" podStartSLOduration=102.433536033 podStartE2EDuration="1m42.433536033s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:29.431759549 +0000 UTC m=+146.541213562" watchObservedRunningTime="2025-11-22 07:13:29.433536033 +0000 UTC m=+146.542990046" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.500563 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.511951 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.011927556 +0000 UTC m=+147.121381569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.518782 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.519332 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.019316 +0000 UTC m=+147.128770013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.627339 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.628276 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.128180942 +0000 UTC m=+147.237634955 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.645820 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.736715 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.737070 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.237045273 +0000 UTC m=+147.346499276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.765615 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" event={"ID":"75d7a4b2-62f1-4a8b-af0f-3f3efc291883","Type":"ContainerStarted","Data":"24fb8efaf9f20aeacb482fc320c7de6016fdd1a793eab9961c78504f1ae6de45"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.770680 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" event={"ID":"650a26c5-072d-4d2e-bc26-181e8a4658dc","Type":"ContainerStarted","Data":"afd900a822c7baaf5dc3b1f2e9ae273a0f7c07dd95bbf67c63dbbf861f7b3885"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.801591 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" event={"ID":"d6f53ed8-9185-4537-a9d0-ef9176e61bd4","Type":"ContainerStarted","Data":"c7b7a36c6f439687fef254716288fb4ecd8aea0cc79b8bc1d4f2ccd88c7bc903"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.810131 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" event={"ID":"7ba449ab-e58d-4b27-9f93-8dd3d784a077","Type":"ContainerStarted","Data":"f9a7df6ef31f81fd4ac409d18e4c80f1ab69dceee44a1018292881fcb1db595c"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.837439 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.837843 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.838284 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.338256985 +0000 UTC m=+147.447710998 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.842430 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.844754 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" event={"ID":"d9c21569-09b7-4ba1-823a-28d373220f18","Type":"ContainerStarted","Data":"b22cf6bb3f247d73ad6b60c3cc7eecb4f72ee5e599e4b633c888a409ae262c1a"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.870464 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" event={"ID":"b6d0965a-9351-48ba-82f0-0c1623041c7e","Type":"ContainerStarted","Data":"9cd7b6c5a7c1f425cc6f03713b34dc8b2413c5c08d897a5d4f135b8bb4bf2fbe"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.873697 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.879048 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2zrch" event={"ID":"54a8c9df-7649-46eb-afdd-054fa0c1f5eb","Type":"ContainerStarted","Data":"c2d4a1039fdde5c26932ed95db2be12ac43fe0a8f528e64d1a08510975a7c13e"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.882901 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" event={"ID":"70a7e3ab-45ef-4c7a-bd14-ac4d75df2020","Type":"ContainerStarted","Data":"b59317e733a92a9bfdeb39e3e2691366ac28a8ed8f2460ae8f06a96cc3783489"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.883594 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.887685 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.891003 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" event={"ID":"d5baff14-8d60-47bf-a0a7-42a485b60a96","Type":"ContainerStarted","Data":"5e77b580c5f12265fade999c85389db7335fd76f6c1d91667d957aaa76dbfec5"} Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.889682 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb576b64c_589c_409a_b3b8_482e9066c45a.slice/crio-41e82c1ad22f578fbe5fea5bc85f88bc5ff66698285680ccb1fddb8dc444ff2f WatchSource:0}: Error finding container 41e82c1ad22f578fbe5fea5bc85f88bc5ff66698285680ccb1fddb8dc444ff2f: Status 404 returned error can't find the container with id 41e82c1ad22f578fbe5fea5bc85f88bc5ff66698285680ccb1fddb8dc444ff2f Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.895326 4929 generic.go:334] "Generic (PLEG): container finished" podID="3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6" containerID="d9ac72dedd323fc7b4ddd58de2c624c8e507d99f1831a9d86afd30cb4339c684" exitCode=0 Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.895377 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" event={"ID":"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6","Type":"ContainerDied","Data":"d9ac72dedd323fc7b4ddd58de2c624c8e507d99f1831a9d86afd30cb4339c684"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.895839 4929 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-d4gn6 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.895861 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" podUID="70a7e3ab-45ef-4c7a-bd14-ac4d75df2020" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.898925 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5b957d93_142e_42ea_b527_ab92b32b2f8c.slice/crio-215821e87c29324b71f62140130002bc5be95b22fa87719fcd5888079499af1b WatchSource:0}: Error finding container 215821e87c29324b71f62140130002bc5be95b22fa87719fcd5888079499af1b: Status 404 returned error can't find the container with id 215821e87c29324b71f62140130002bc5be95b22fa87719fcd5888079499af1b Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.899769 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vp5z4" event={"ID":"5d850ad3-bdf5-479f-9ca7-da300763391c","Type":"ContainerStarted","Data":"ac1b3939e91162c6df15d7d4ce30995f21950359a41ba3baa02d54fccf8109b3"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.902018 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" event={"ID":"9ced1bca-93c6-442e-b0b3-aea22cfde35d","Type":"ContainerStarted","Data":"e6f03e5a04828cb2c72d636fe6c4b4352a5e33bdbe3f92677eb21ed30c4d3e86"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.902945 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" event={"ID":"8336bed9-3f96-44a6-89a0-d6440caa4eee","Type":"ContainerStarted","Data":"68ceb3a93373db41921a42e7823a5656c2ddac2cb43b69d35e112d5148c008dd"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.904013 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" event={"ID":"c4607e1c-c400-490e-96aa-3f995008f6d8","Type":"ContainerStarted","Data":"a8ea8cf04e4aec3c6dbda1d6c778b0d78300b59fb27d69f4750c447164a5db19"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.905465 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" event={"ID":"79a4c973-d95f-4898-81b8-00b97f3c2aa4","Type":"ContainerStarted","Data":"f774a1f2d7bbc16dde0fc9fc41b1e1d73d137cf59d41ff1b95d66b1dcb9d5b3a"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.906760 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" event={"ID":"c3f44df6-8db0-4f77-ae46-f361e5d8948f","Type":"ContainerStarted","Data":"ecc1d3739a8422d5e0317b4787c2ab87b749182b2f281c129336d692cab0a96b"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.910725 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" event={"ID":"a95c39bb-f2ae-4a5c-897f-1ac3a476c436","Type":"ContainerStarted","Data":"2aed47b0acb0806c373ea09651fd427ce95dcb8f61e160bfd08ec540c4ffd49a"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.911142 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.913793 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xglp4" event={"ID":"6650d266-80d2-46b5-9da3-f42cee1cc658","Type":"ContainerStarted","Data":"77abd4b0b3bafabbefdc1d5a01f8a54f202ba6cc0f8f219f1188fef47d67c269"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.914195 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.915736 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" event={"ID":"f2646e8d-e1a9-40ef-bfe3-51135e6d767e","Type":"ContainerStarted","Data":"52af84c65b43529ed3faac934721122e5efd2dc47675a0e3058c8361a9ed95ae"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.917580 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqwhs" event={"ID":"de2ba535-d661-453d-b4cd-19c6e7628b0c","Type":"ContainerStarted","Data":"1903a286d770690a0bbdbbd0c4400b018a6a501df140bb430e9e265621608bbe"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.923660 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-hr5lv"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.926318 4929 generic.go:334] "Generic (PLEG): container finished" podID="a410f03e-218e-4646-9b41-17a32af9330d" containerID="174d59f491d2c736b2c55bcefa32f24f4c6e69705a6744685c1818ef2e27f900" exitCode=0 Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.926377 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" event={"ID":"a410f03e-218e-4646-9b41-17a32af9330d","Type":"ContainerDied","Data":"174d59f491d2c736b2c55bcefa32f24f4c6e69705a6744685c1818ef2e27f900"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.926689 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rwvvh"] Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.940436 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:29 crc kubenswrapper[4929]: E1122 07:13:29.941937 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.441920667 +0000 UTC m=+147.551374680 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.943948 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" event={"ID":"5f87df65-8531-414a-83a3-b4fb6c5059f9","Type":"ContainerStarted","Data":"a3794f301ac62b91919efef5b3d0703292a6c0c039ebe1169191cc47c7ec79cf"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.944700 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.945633 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" event={"ID":"d8fa808f-cd52-4783-a63a-af98d9359ec2","Type":"ContainerStarted","Data":"82a72155a975cf04107b9274e5c9558333bd76e78b11c634a6b3dd255eeeedca"} Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.946440 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.946876 4929 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-hxxfj container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.946906 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.949381 4929 patch_prober.go:28] interesting pod/console-operator-58897d9998-b7hfk container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.949412 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" podUID="d8fa808f-cd52-4783-a63a-af98d9359ec2" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.949548 4929 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lcg6c container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.949576 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 22 07:13:29 crc kubenswrapper[4929]: W1122 07:13:29.970227 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70cd497c_e503_4088_ba79_c3d684026d40.slice/crio-761aa3ee45aeca043b75750f693de0c43511875a845e81b36a4fb391d8a9210f WatchSource:0}: Error finding container 761aa3ee45aeca043b75750f693de0c43511875a845e81b36a4fb391d8a9210f: Status 404 returned error can't find the container with id 761aa3ee45aeca043b75750f693de0c43511875a845e81b36a4fb391d8a9210f Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.970326 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" podStartSLOduration=102.970286493 podStartE2EDuration="1m42.970286493s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:29.970050428 +0000 UTC m=+147.079504441" watchObservedRunningTime="2025-11-22 07:13:29.970286493 +0000 UTC m=+147.079740506" Nov 22 07:13:29 crc kubenswrapper[4929]: I1122 07:13:29.987338 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d"] Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.048808 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.052058 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.55203496 +0000 UTC m=+147.661488973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.052969 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-n9lkq" podStartSLOduration=103.052955303 podStartE2EDuration="1m43.052955303s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.00144624 +0000 UTC m=+147.110900273" watchObservedRunningTime="2025-11-22 07:13:30.052955303 +0000 UTC m=+147.162409316" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.086666 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2jnpc"] Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.092492 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9"] Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.110296 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljvnn"] Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.165477 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.166771 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.666756268 +0000 UTC m=+147.776210281 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: W1122 07:13:30.172642 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68abc8a7_8068_4f85_ac04_ecdcaff7bab4.slice/crio-c398a45dbdb41ebb5e32cb4b1c6186be0aeafd06c9ab8aa2886831190cdb9727 WatchSource:0}: Error finding container c398a45dbdb41ebb5e32cb4b1c6186be0aeafd06c9ab8aa2886831190cdb9727: Status 404 returned error can't find the container with id c398a45dbdb41ebb5e32cb4b1c6186be0aeafd06c9ab8aa2886831190cdb9727 Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.268553 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.268843 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.768829859 +0000 UTC m=+147.878283872 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.353615 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h8b2l" podStartSLOduration=103.35356974 podStartE2EDuration="1m43.35356974s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.352152965 +0000 UTC m=+147.461606988" watchObservedRunningTime="2025-11-22 07:13:30.35356974 +0000 UTC m=+147.463023743" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.369742 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.370173 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.870139403 +0000 UTC m=+147.979593416 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.432873 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-tqwhs" podStartSLOduration=103.432857795 podStartE2EDuration="1m43.432857795s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.432536497 +0000 UTC m=+147.541990520" watchObservedRunningTime="2025-11-22 07:13:30.432857795 +0000 UTC m=+147.542311808" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.471399 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.472038 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:30.972019481 +0000 UTC m=+148.081473494 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.474907 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xfmc5" podStartSLOduration=103.474885192 podStartE2EDuration="1m43.474885192s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.47360377 +0000 UTC m=+147.583057803" watchObservedRunningTime="2025-11-22 07:13:30.474885192 +0000 UTC m=+147.584339215" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.552495 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" podStartSLOduration=103.552474275 podStartE2EDuration="1m43.552474275s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.513565056 +0000 UTC m=+147.623019069" watchObservedRunningTime="2025-11-22 07:13:30.552474275 +0000 UTC m=+147.661928288" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.575161 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.575565 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.07554777 +0000 UTC m=+148.185001783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.600989 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" podStartSLOduration=103.600964363 podStartE2EDuration="1m43.600964363s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.593791704 +0000 UTC m=+147.703246317" watchObservedRunningTime="2025-11-22 07:13:30.600964363 +0000 UTC m=+147.710418376" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.602586 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" podStartSLOduration=103.602579243 podStartE2EDuration="1m43.602579243s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.55388206 +0000 UTC m=+147.663336083" watchObservedRunningTime="2025-11-22 07:13:30.602579243 +0000 UTC m=+147.712033256" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.681261 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.682289 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.182248248 +0000 UTC m=+148.291702261 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.682943 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.683553 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.183517319 +0000 UTC m=+148.292971332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.783984 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.784165 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.284140736 +0000 UTC m=+148.393594769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.784613 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.785254 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.285239373 +0000 UTC m=+148.394693386 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.886278 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.886668 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.386632639 +0000 UTC m=+148.496086652 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.950863 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" event={"ID":"9ced1bca-93c6-442e-b0b3-aea22cfde35d","Type":"ContainerStarted","Data":"dac18c9c81aa2ba4de9038f584b7e5448357b0d5f8db542fe2e6bb1d113ee56a"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.953181 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" event={"ID":"5f87df65-8531-414a-83a3-b4fb6c5059f9","Type":"ContainerStarted","Data":"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.959047 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" event={"ID":"6e050819-4ad1-4091-bc11-2c808fb19f36","Type":"ContainerStarted","Data":"f891b27868fcfa7b0fbeab13be9e23d4cd28ffbe0122a3c5e559c81a53fb6fa9"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.959096 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" event={"ID":"6e050819-4ad1-4091-bc11-2c808fb19f36","Type":"ContainerStarted","Data":"e0a84e2665b80417f45488cafcf69cc95cc6e600c48aecfffe3afe8151d432dd"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.960959 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" event={"ID":"a95c39bb-f2ae-4a5c-897f-1ac3a476c436","Type":"ContainerStarted","Data":"1070fcba26aa714e79a73ce3e21951a05405b60a3a175241606025bd3a9317f8"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.962217 4929 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lcg6c container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.962278 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.967096 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" event={"ID":"d5baff14-8d60-47bf-a0a7-42a485b60a96","Type":"ContainerStarted","Data":"5dce2964630d71209b61b6a5e2227d9adbb917e9dba0eac2f6f4a01206a3a38c"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.973357 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" event={"ID":"650a26c5-072d-4d2e-bc26-181e8a4658dc","Type":"ContainerStarted","Data":"3f9b8898200280b5830045a9196ec09de93f0a89e00bb2dfba23f9e8b2d5f2dc"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.975964 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" podStartSLOduration=103.975948504 podStartE2EDuration="1m43.975948504s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.97460786 +0000 UTC m=+148.084061883" watchObservedRunningTime="2025-11-22 07:13:30.975948504 +0000 UTC m=+148.085402517" Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.977609 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" event={"ID":"8891c9c1-14b1-4057-87d8-fed0883277c5","Type":"ContainerStarted","Data":"3b167030059a602d796d489457e1e3c76b55d429cebe9ec931bfa5e9c6dace1f"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.979268 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" event={"ID":"b576b64c-589c-409a-b3b8-482e9066c45a","Type":"ContainerStarted","Data":"66c97567db14749e911a0c4dcb09e0ac903f9236131e53dacbcf5db76ec71ce3"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.979316 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" event={"ID":"b576b64c-589c-409a-b3b8-482e9066c45a","Type":"ContainerStarted","Data":"41e82c1ad22f578fbe5fea5bc85f88bc5ff66698285680ccb1fddb8dc444ff2f"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.980440 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" event={"ID":"964d3766-8968-4ce0-b68c-bf839937e0d3","Type":"ContainerStarted","Data":"27bd93b4fded2c165d462eb932b72eceb747988cbf647ff9e5b0ea812bd0883f"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.989003 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:30 crc kubenswrapper[4929]: E1122 07:13:30.991180 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.491166613 +0000 UTC m=+148.600620626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.991458 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" event={"ID":"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb","Type":"ContainerStarted","Data":"7f3cf96b0250e51aa94a1a08756d72c02517c45dffd4239cc644a240e901ff07"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.996520 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2zrch" event={"ID":"54a8c9df-7649-46eb-afdd-054fa0c1f5eb","Type":"ContainerStarted","Data":"798b17d9ed688325891fe3e1baf250da999bbc6b1a5376a3b8eb5e5c1f165984"} Nov 22 07:13:30 crc kubenswrapper[4929]: I1122 07:13:30.998161 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-f2px2" podStartSLOduration=103.998144427 podStartE2EDuration="1m43.998144427s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:30.994033094 +0000 UTC m=+148.103487107" watchObservedRunningTime="2025-11-22 07:13:30.998144427 +0000 UTC m=+148.107598440" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.006983 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-vp5z4" event={"ID":"5d850ad3-bdf5-479f-9ca7-da300763391c","Type":"ContainerStarted","Data":"9219e2f0e04a19a350fafbfbd63730adcec993577fe041f31f1a37156a455c28"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.009934 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-xglp4" event={"ID":"6650d266-80d2-46b5-9da3-f42cee1cc658","Type":"ContainerStarted","Data":"ea256e954620995fef5de0a4aaa7d2fa287012dedae9ad3822994847245c0fbc"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.010941 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" event={"ID":"c74d8b5a-3054-4132-b375-8956e231f4ac","Type":"ContainerStarted","Data":"b9d58f392e3618a363042b5a29c1e12d7c3fd7a116451813cb5282bcf035f3c9"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.015438 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" event={"ID":"d83571fe-a17e-49ad-a121-b58f90d914d7","Type":"ContainerStarted","Data":"c38a50febd35d1cb5007faaabb1e67834f785cc7926eb523d112dd2525e0ea75"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.022967 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" event={"ID":"b6d0965a-9351-48ba-82f0-0c1623041c7e","Type":"ContainerStarted","Data":"c3b6a36850be98f75b52f4cb9fc92e95432687f9d4631ac63b44d46f44a38274"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.024990 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-m2bg6" podStartSLOduration=104.024979315 podStartE2EDuration="1m44.024979315s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:31.023624811 +0000 UTC m=+148.133078824" watchObservedRunningTime="2025-11-22 07:13:31.024979315 +0000 UTC m=+148.134433328" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.031244 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" event={"ID":"8336bed9-3f96-44a6-89a0-d6440caa4eee","Type":"ContainerStarted","Data":"08fcd2aa6007df29396e2810f7857aecd9a8d37b6efd052c9ba58e300ccab8f6"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.034064 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" event={"ID":"7ba449ab-e58d-4b27-9f93-8dd3d784a077","Type":"ContainerStarted","Data":"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.035394 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" event={"ID":"c4607e1c-c400-490e-96aa-3f995008f6d8","Type":"ContainerStarted","Data":"baf9db94e46ebbc4772eb3e7d2a1155cf2bba4e690462e7538699804053a2488"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.038233 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" event={"ID":"5b957d93-142e-42ea-b527-ab92b32b2f8c","Type":"ContainerStarted","Data":"02657711c6a8a607a5875f79304930674076e32ba41f7425d12ac7371aa1e41b"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.038264 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" event={"ID":"5b957d93-142e-42ea-b527-ab92b32b2f8c","Type":"ContainerStarted","Data":"215821e87c29324b71f62140130002bc5be95b22fa87719fcd5888079499af1b"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.040252 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" event={"ID":"79a4c973-d95f-4898-81b8-00b97f3c2aa4","Type":"ContainerStarted","Data":"07aacbf33a9c6063cfb84d427ed9bc30d7fc3311a6e4a0219c9c56e02fdd656b"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.041838 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" event={"ID":"75d7a4b2-62f1-4a8b-af0f-3f3efc291883","Type":"ContainerStarted","Data":"b8193c5fdf259bac1b3242a42b7346d07760ac154a554ed5451ffdc1577902a0"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.044534 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" event={"ID":"c3f44df6-8db0-4f77-ae46-f361e5d8948f","Type":"ContainerStarted","Data":"e8ec3931cc6795cdb4ba38dcce79081134be81c4190ac7032121d9d73ce79970"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.045417 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljvnn" event={"ID":"a77d942c-13ee-4835-b773-7dbe3dc4ef76","Type":"ContainerStarted","Data":"1878b1b25a92706e1e96b92254d92f58a8ba1019d931dc9772701dc9fe77f968"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.046539 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" event={"ID":"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7","Type":"ContainerStarted","Data":"c535ad59837cf84ed210f2969ce351784f4e9af2f803fda5d83eaa1a570812a5"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.051849 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" event={"ID":"47615a4c-c032-42d8-935a-8a2fbb40c0af","Type":"ContainerStarted","Data":"ac73eef5cc9f6a675108d2e4ce9cd26d39acafb28961e4402d72439b4f912b9b"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.052111 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.057504 4929 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-nbrph container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.057580 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" podUID="47615a4c-c032-42d8-935a-8a2fbb40c0af" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.059406 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" event={"ID":"70cd497c-e503-4088-ba79-c3d684026d40","Type":"ContainerStarted","Data":"761aa3ee45aeca043b75750f693de0c43511875a845e81b36a4fb391d8a9210f"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.063031 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rwvvh" event={"ID":"524eccdd-f85b-47c0-8c0a-d251ff814500","Type":"ContainerStarted","Data":"619aa062e9602c711d3eaf0e4cbe18197a9d1b48b5ad5543231fc3bb1cb2038c"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.065246 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" event={"ID":"3db640c6-77ea-4bb7-a250-2b82f20eb4c6","Type":"ContainerStarted","Data":"4ac481fef33f4949d43599b3f00b20822e276b9e957c2dcf691fdf655aed7b8d"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.067052 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" event={"ID":"68abc8a7-8068-4f85-ac04-ecdcaff7bab4","Type":"ContainerStarted","Data":"c398a45dbdb41ebb5e32cb4b1c6186be0aeafd06c9ab8aa2886831190cdb9727"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.070086 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" event={"ID":"d6f53ed8-9185-4537-a9d0-ef9176e61bd4","Type":"ContainerStarted","Data":"515788ae6ec9f8b4f46085bcfe41f5f3a1da3adff0bac2c67340ff4cb7ed66a8"} Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.071192 4929 patch_prober.go:28] interesting pod/console-operator-58897d9998-b7hfk container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.071251 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" podUID="d8fa808f-cd52-4783-a63a-af98d9359ec2" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.072323 4929 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-d4gn6 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" start-of-body= Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.072351 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" podUID="70a7e3ab-45ef-4c7a-bd14-ac4d75df2020" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": dial tcp 10.217.0.33:5443: connect: connection refused" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.078803 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" podStartSLOduration=104.078775715 podStartE2EDuration="1m44.078775715s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:31.078367685 +0000 UTC m=+148.187821718" watchObservedRunningTime="2025-11-22 07:13:31.078775715 +0000 UTC m=+148.188229728" Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.091090 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.092753 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.592731033 +0000 UTC m=+148.702185046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.194034 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.194452 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.694434096 +0000 UTC m=+148.803888109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.294955 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.295265 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.795236217 +0000 UTC m=+148.904690240 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.396156 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.397248 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.897226448 +0000 UTC m=+149.006680461 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.497722 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.498029 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:31.998014879 +0000 UTC m=+149.107468892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.599245 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.599675 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.099659021 +0000 UTC m=+149.209113034 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.701061 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.701565 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.201542559 +0000 UTC m=+149.310996572 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.804704 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.805027 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.305012276 +0000 UTC m=+149.414466289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:31 crc kubenswrapper[4929]: I1122 07:13:31.906849 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:31 crc kubenswrapper[4929]: E1122 07:13:31.907616 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.407596321 +0000 UTC m=+149.517050334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.008460 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.008951 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.508931486 +0000 UTC m=+149.618385499 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.080328 4929 generic.go:334] "Generic (PLEG): container finished" podID="75d7a4b2-62f1-4a8b-af0f-3f3efc291883" containerID="b8193c5fdf259bac1b3242a42b7346d07760ac154a554ed5451ffdc1577902a0" exitCode=0 Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.080394 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" event={"ID":"75d7a4b2-62f1-4a8b-af0f-3f3efc291883","Type":"ContainerDied","Data":"b8193c5fdf259bac1b3242a42b7346d07760ac154a554ed5451ffdc1577902a0"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.100664 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" event={"ID":"8336bed9-3f96-44a6-89a0-d6440caa4eee","Type":"ContainerStarted","Data":"ebb850be0d3d00e6e2b15c7b347563b5663f154954c0584765d1ea600e9dc164"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.105436 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rwvvh" event={"ID":"524eccdd-f85b-47c0-8c0a-d251ff814500","Type":"ContainerStarted","Data":"d1835ec6d9cc9be24d5b918e7d7156396fc5d765fcd921ca7a1e20b5e0f39d50"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.110949 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.111438 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.611412519 +0000 UTC m=+149.720866542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.122552 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" event={"ID":"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7","Type":"ContainerStarted","Data":"f22011243d0b76611683e1174c929bba5e5b283f0826ffe1fc4e2f80aa4d326b"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.122601 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" event={"ID":"a3233e89-e2e8-4172-ab0c-ff6f190f9fe7","Type":"ContainerStarted","Data":"2eba69e8a608bb1bec18f0b8a36fec0a86bc4abf4c3f23a408ba2d1f937ca2c9"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.146385 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-gwj27" podStartSLOduration=105.146363039 podStartE2EDuration="1m45.146363039s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.123414697 +0000 UTC m=+149.232868710" watchObservedRunningTime="2025-11-22 07:13:32.146363039 +0000 UTC m=+149.255817062" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.153849 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" event={"ID":"70cd497c-e503-4088-ba79-c3d684026d40","Type":"ContainerStarted","Data":"f4cacab4a1051aa710aa50c8037a4262a9fa209c8fb8054079c285af6fd552a6"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.205081 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" event={"ID":"964d3766-8968-4ce0-b68c-bf839937e0d3","Type":"ContainerStarted","Data":"ce1e64ca488bb38e43bfced12902c5503ce27853ab8d77940c31bfe585d70051"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.211257 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-67z2d" podStartSLOduration=105.211235725 podStartE2EDuration="1m45.211235725s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.153496177 +0000 UTC m=+149.262950220" watchObservedRunningTime="2025-11-22 07:13:32.211235725 +0000 UTC m=+149.320689728" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.212850 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.214867 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.714849245 +0000 UTC m=+149.824303328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.215467 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" event={"ID":"b6d0965a-9351-48ba-82f0-0c1623041c7e","Type":"ContainerStarted","Data":"49214cc376d2a47a63d67b042015ca7b62068faff9193e5058adffc0e9100e54"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.230465 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-9wd5b" podStartSLOduration=105.230442804 podStartE2EDuration="1m45.230442804s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.209950263 +0000 UTC m=+149.319404286" watchObservedRunningTime="2025-11-22 07:13:32.230442804 +0000 UTC m=+149.339896817" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.230705 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k7jgs" podStartSLOduration=105.23070201 podStartE2EDuration="1m45.23070201s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.229779527 +0000 UTC m=+149.339233540" watchObservedRunningTime="2025-11-22 07:13:32.23070201 +0000 UTC m=+149.340156023" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.230776 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" event={"ID":"3db640c6-77ea-4bb7-a250-2b82f20eb4c6","Type":"ContainerStarted","Data":"46a78fafe6c2703ddf21eb42054d18c75315dbc514cc1e3baf65cc7585ad21c4"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.256376 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" event={"ID":"3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6","Type":"ContainerStarted","Data":"44050bd50dc99ebfc4e4111029d6dfd3f99db40763cd045f64a4877c1f33123f"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.271125 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" event={"ID":"d5baff14-8d60-47bf-a0a7-42a485b60a96","Type":"ContainerStarted","Data":"1c2e9d0c80fb45b99137346e53f2fb116f6178bbcb54b7c80863fc6b51c44b64"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.298947 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" event={"ID":"68abc8a7-8068-4f85-ac04-ecdcaff7bab4","Type":"ContainerStarted","Data":"ea117feb89d35e3a1af6f2d04c7819dd8d0f785756ef21e8ea984417c6df5b9d"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.299016 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" event={"ID":"68abc8a7-8068-4f85-ac04-ecdcaff7bab4","Type":"ContainerStarted","Data":"b49e5d8c1d82f1cb45d0d0959cd5d36bbe16df9e10574b6c954e67cb5cc4c0da"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.306822 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" podStartSLOduration=105.306803916 podStartE2EDuration="1m45.306803916s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.305909593 +0000 UTC m=+149.415363606" watchObservedRunningTime="2025-11-22 07:13:32.306803916 +0000 UTC m=+149.416257929" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.307278 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-25lrd" podStartSLOduration=105.307272867 podStartE2EDuration="1m45.307272867s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.263911197 +0000 UTC m=+149.373365210" watchObservedRunningTime="2025-11-22 07:13:32.307272867 +0000 UTC m=+149.416726880" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.313621 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.315023 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.814955219 +0000 UTC m=+149.924409272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.317390 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" event={"ID":"9ced1bca-93c6-442e-b0b3-aea22cfde35d","Type":"ContainerStarted","Data":"e723d57f4d41f061c9e6817d77a62feb41639ba15b15036df06742d493af3033"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.320913 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" event={"ID":"ed150f1a-6e10-4c1a-90e8-4eb6fdcac0cb","Type":"ContainerStarted","Data":"1bbdd570deb8fcb5f677ec2cc7918502bbd37a51482fa6e04f6aa873afd8e231"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.331808 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" event={"ID":"8891c9c1-14b1-4057-87d8-fed0883277c5","Type":"ContainerStarted","Data":"a42a8e7951f91abe3b73934e60f03c2c11ab109727b27dcf30cb335a0839fe88"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.333039 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.335092 4929 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-r9sq7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.335138 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" podUID="8891c9c1-14b1-4057-87d8-fed0883277c5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.349618 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" event={"ID":"a410f03e-218e-4646-9b41-17a32af9330d","Type":"ContainerStarted","Data":"0d346926ceea61e613c7a1c8894159099c0ea4a79f2f6276d76410b8b1e83911"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.351938 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4lzcw" podStartSLOduration=105.35191923 podStartE2EDuration="1m45.35191923s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.331415339 +0000 UTC m=+149.440869352" watchObservedRunningTime="2025-11-22 07:13:32.35191923 +0000 UTC m=+149.461373233" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.377403 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" event={"ID":"c3f44df6-8db0-4f77-ae46-f361e5d8948f","Type":"ContainerStarted","Data":"3331cbe84e0bb4fd9fd69351113360691eb9166b1592ec3c434a98038abe254c"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.384441 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-2jnpc" podStartSLOduration=105.384421789 podStartE2EDuration="1m45.384421789s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.350548735 +0000 UTC m=+149.460002748" watchObservedRunningTime="2025-11-22 07:13:32.384421789 +0000 UTC m=+149.493875802" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.384725 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-jdvf9" podStartSLOduration=105.384719377 podStartE2EDuration="1m45.384719377s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.381722882 +0000 UTC m=+149.491176895" watchObservedRunningTime="2025-11-22 07:13:32.384719377 +0000 UTC m=+149.494173380" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.393116 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljvnn" event={"ID":"a77d942c-13ee-4835-b773-7dbe3dc4ef76","Type":"ContainerStarted","Data":"87f8d735ec8ba972ce27cc838dea7f09f8f19995c932efbf74115bf009e57ff5"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.402922 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-4xxhl" podStartSLOduration=105.40290443 podStartE2EDuration="1m45.40290443s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.402189842 +0000 UTC m=+149.511643895" watchObservedRunningTime="2025-11-22 07:13:32.40290443 +0000 UTC m=+149.512358443" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.414588 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" event={"ID":"c4607e1c-c400-490e-96aa-3f995008f6d8","Type":"ContainerStarted","Data":"37b99424086b91bdfa66faeeb9dd0f70a3db6f2194b13fd3c6a971294fb46ecc"} Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.415044 4929 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-nbrph container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.415085 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" podUID="47615a4c-c032-42d8-935a-8a2fbb40c0af" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.415203 4929 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lcg6c container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.415288 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.416968 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.417505 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:32.917489963 +0000 UTC m=+150.026943976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.436743 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k72qt" podStartSLOduration=105.436725572 podStartE2EDuration="1m45.436725572s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.435637845 +0000 UTC m=+149.545091868" watchObservedRunningTime="2025-11-22 07:13:32.436725572 +0000 UTC m=+149.546179585" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.475397 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wbnx6" podStartSLOduration=105.475378635 podStartE2EDuration="1m45.475378635s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.475273762 +0000 UTC m=+149.584727775" watchObservedRunningTime="2025-11-22 07:13:32.475378635 +0000 UTC m=+149.584832648" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.475670 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" podStartSLOduration=105.475665122 podStartE2EDuration="1m45.475665122s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.456421073 +0000 UTC m=+149.565875086" watchObservedRunningTime="2025-11-22 07:13:32.475665122 +0000 UTC m=+149.585119135" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.506643 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" podStartSLOduration=105.506623073 podStartE2EDuration="1m45.506623073s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.505119186 +0000 UTC m=+149.614573209" watchObservedRunningTime="2025-11-22 07:13:32.506623073 +0000 UTC m=+149.616077086" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.518079 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.519396 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.019362701 +0000 UTC m=+150.128816724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.524815 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-xglp4" podStartSLOduration=7.524796126 podStartE2EDuration="7.524796126s" podCreationTimestamp="2025-11-22 07:13:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.522665033 +0000 UTC m=+149.632119046" watchObservedRunningTime="2025-11-22 07:13:32.524796126 +0000 UTC m=+149.634250149" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.547287 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-ljvnn" podStartSLOduration=7.547200124 podStartE2EDuration="7.547200124s" podCreationTimestamp="2025-11-22 07:13:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.545754228 +0000 UTC m=+149.655208241" watchObservedRunningTime="2025-11-22 07:13:32.547200124 +0000 UTC m=+149.656654137" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.568062 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" podStartSLOduration=105.568040563 podStartE2EDuration="1m45.568040563s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.56670589 +0000 UTC m=+149.676159913" watchObservedRunningTime="2025-11-22 07:13:32.568040563 +0000 UTC m=+149.677494576" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.620139 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.620651 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.120630593 +0000 UTC m=+150.230084656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.643810 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" podStartSLOduration=105.64379061 podStartE2EDuration="1m45.64379061s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.642641442 +0000 UTC m=+149.752095455" watchObservedRunningTime="2025-11-22 07:13:32.64379061 +0000 UTC m=+149.753244623" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.644550 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-htp4z" podStartSLOduration=105.644543219 podStartE2EDuration="1m45.644543219s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.593365364 +0000 UTC m=+149.702819377" watchObservedRunningTime="2025-11-22 07:13:32.644543219 +0000 UTC m=+149.753997232" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.647084 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.647311 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.647355 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.678717 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-vnrtj" podStartSLOduration=105.67869805 podStartE2EDuration="1m45.67869805s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.676388372 +0000 UTC m=+149.785842395" watchObservedRunningTime="2025-11-22 07:13:32.67869805 +0000 UTC m=+149.788152063" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.701314 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.701707 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.705852 4929 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nnnk9 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.705905 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" podUID="a410f03e-218e-4646-9b41-17a32af9330d" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.721430 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.721715 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.221700891 +0000 UTC m=+150.331154904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.736393 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" podStartSLOduration=105.736371916 podStartE2EDuration="1m45.736371916s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.708586504 +0000 UTC m=+149.818040517" watchObservedRunningTime="2025-11-22 07:13:32.736371916 +0000 UTC m=+149.845825929" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.738270 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-vp5z4" podStartSLOduration=105.738262424 podStartE2EDuration="1m45.738262424s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.735748631 +0000 UTC m=+149.845202664" watchObservedRunningTime="2025-11-22 07:13:32.738262424 +0000 UTC m=+149.847716437" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.747586 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.749322 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.751363 4929 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-wmd4n container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.751424 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" podUID="3b3f6540-7bc1-45b3-be62-51a9cf6ddeb6" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.8:8443/livez\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.807343 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5tlxw" podStartSLOduration=105.807322724 podStartE2EDuration="1m45.807322724s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.78268628 +0000 UTC m=+149.892140293" watchObservedRunningTime="2025-11-22 07:13:32.807322724 +0000 UTC m=+149.916776737" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.822706 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.823175 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.323154448 +0000 UTC m=+150.432608521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.833377 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-lsnjk" podStartSLOduration=105.833355892 podStartE2EDuration="1m45.833355892s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.811463427 +0000 UTC m=+149.920917460" watchObservedRunningTime="2025-11-22 07:13:32.833355892 +0000 UTC m=+149.942809905" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.834425 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-2zrch" podStartSLOduration=105.834420449 podStartE2EDuration="1m45.834420449s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:32.832689956 +0000 UTC m=+149.942143979" watchObservedRunningTime="2025-11-22 07:13:32.834420449 +0000 UTC m=+149.943874462" Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.923764 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.923924 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.423900698 +0000 UTC m=+150.533354711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:32 crc kubenswrapper[4929]: I1122 07:13:32.923979 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:32 crc kubenswrapper[4929]: E1122 07:13:32.924323 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.424315998 +0000 UTC m=+150.533770011 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.024806 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.024950 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.524920994 +0000 UTC m=+150.634375007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.025421 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.025711 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.525701604 +0000 UTC m=+150.635155697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.126035 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.126233 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.626196567 +0000 UTC m=+150.735650580 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.126354 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.126661 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.626654138 +0000 UTC m=+150.736108151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.227591 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.227751 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.727729186 +0000 UTC m=+150.837183199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.227959 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.228235 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.728226489 +0000 UTC m=+150.837680502 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.329235 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.329447 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.829413729 +0000 UTC m=+150.938867742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.329529 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.329931 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.829920842 +0000 UTC m=+150.939374925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.414800 4929 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-d4gn6 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.33:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.414860 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" podUID="70a7e3ab-45ef-4c7a-bd14-ac4d75df2020" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.33:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.418871 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rwvvh" event={"ID":"524eccdd-f85b-47c0-8c0a-d251ff814500","Type":"ContainerStarted","Data":"6910c95e045d51c8a8cafd364f4ec575cb309b548c05450e347018a8453145f2"} Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.419023 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.420479 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" event={"ID":"75d7a4b2-62f1-4a8b-af0f-3f3efc291883","Type":"ContainerStarted","Data":"7d58545df81fa67a3aab75652012ba135beff36e08e5f34e091775fd8a897d4c"} Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.420596 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.422633 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" event={"ID":"a410f03e-218e-4646-9b41-17a32af9330d","Type":"ContainerStarted","Data":"842c035f94217bd0cacaf018b4b068329fa0461013ae59ebdd83aab2124a71db"} Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.424520 4929 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-r9sq7 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" start-of-body= Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.424534 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" event={"ID":"d83571fe-a17e-49ad-a121-b58f90d914d7","Type":"ContainerStarted","Data":"828ad08fe83293eede4fe8bd3afecc4c46e4966f2c5cfb0196f2b597e7448dcd"} Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.424561 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" podUID="8891c9c1-14b1-4057-87d8-fed0883277c5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.27:8443/healthz\": dial tcp 10.217.0.27:8443: connect: connection refused" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.425930 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.430757 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.430910 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.930884057 +0000 UTC m=+151.040338070 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.431273 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.432298 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:33.932279912 +0000 UTC m=+151.041733995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.468013 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-rwvvh" podStartSLOduration=8.467991041 podStartE2EDuration="8.467991041s" podCreationTimestamp="2025-11-22 07:13:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:33.467455858 +0000 UTC m=+150.576909871" watchObservedRunningTime="2025-11-22 07:13:33.467991041 +0000 UTC m=+150.577445054" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.497442 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" podStartSLOduration=106.497419504 podStartE2EDuration="1m46.497419504s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:33.495145978 +0000 UTC m=+150.604599991" watchObservedRunningTime="2025-11-22 07:13:33.497419504 +0000 UTC m=+150.606873517" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.534158 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.534469 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.034436456 +0000 UTC m=+151.143890489 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.535011 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.536507 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.036492518 +0000 UTC m=+151.145946591 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.635808 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.636009 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.135979326 +0000 UTC m=+151.245433339 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.636101 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.636470 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.136459418 +0000 UTC m=+151.245913501 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.648762 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:33 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:33 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:33 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.648832 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.737138 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.737305 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.23728501 +0000 UTC m=+151.346739023 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.737399 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.737735 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.23772459 +0000 UTC m=+151.347178603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.838461 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.838682 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.338651804 +0000 UTC m=+151.448105817 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.838875 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.839241 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.339222928 +0000 UTC m=+151.448676951 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.939924 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.940155 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.440107521 +0000 UTC m=+151.549561534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:33 crc kubenswrapper[4929]: I1122 07:13:33.940372 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:33 crc kubenswrapper[4929]: E1122 07:13:33.940746 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.440731436 +0000 UTC m=+151.550185449 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.041285 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.041485 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.541454375 +0000 UTC m=+151.650908388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.041666 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.042172 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.542156793 +0000 UTC m=+151.651610876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.143321 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.143501 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.643475767 +0000 UTC m=+151.752929770 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.143573 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.143946 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.643938248 +0000 UTC m=+151.753392261 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.244527 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.244624 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.744608646 +0000 UTC m=+151.854062659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.244899 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.245168 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.74516157 +0000 UTC m=+151.854615583 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.345990 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.346381 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.846367321 +0000 UTC m=+151.955821334 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.449335 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.450278 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:34.950258719 +0000 UTC m=+152.059712812 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.488555 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-r9sq7" Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.558955 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.559149 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.059123911 +0000 UTC m=+152.168577924 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.559355 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.559813 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.059703015 +0000 UTC m=+152.169157038 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.647434 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:34 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:34 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:34 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.647498 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.660002 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.660155 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.160138407 +0000 UTC m=+152.269592420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.660415 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.660736 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.160724812 +0000 UTC m=+152.270178825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.761432 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.761635 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.261605255 +0000 UTC m=+152.371059268 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.761753 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.762065 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.262053686 +0000 UTC m=+152.371507699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.862407 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.862637 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.362606611 +0000 UTC m=+152.472060634 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.862808 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.863159 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.363141904 +0000 UTC m=+152.472595917 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:34 crc kubenswrapper[4929]: I1122 07:13:34.963564 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:34 crc kubenswrapper[4929]: E1122 07:13:34.963937 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.463921904 +0000 UTC m=+152.573375917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.065436 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.065775 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.565750701 +0000 UTC m=+152.675204714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.166623 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.166798 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.666773938 +0000 UTC m=+152.776227951 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.166992 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.167321 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.667310711 +0000 UTC m=+152.776764734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.268365 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.268580 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.768520472 +0000 UTC m=+152.877974495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.268850 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.269112 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.769100117 +0000 UTC m=+152.878554130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.369906 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.370057 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.870030351 +0000 UTC m=+152.979484364 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.370260 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.370588 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.870580724 +0000 UTC m=+152.980034737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.435463 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" event={"ID":"d83571fe-a17e-49ad-a121-b58f90d914d7","Type":"ContainerStarted","Data":"c5caee58303b952d85e9948752171876d01e33c2f79f7b5bcbb88df2ef1431ea"} Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.470975 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.471116 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.971086428 +0000 UTC m=+153.080540451 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.471246 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.471692 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:35.971681353 +0000 UTC m=+153.081135436 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.572476 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.572700 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.072670209 +0000 UTC m=+153.182124222 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.572901 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.573281 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.073264843 +0000 UTC m=+153.182718926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.648704 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:35 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:35 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:35 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.648784 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.674551 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.674750 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.174719831 +0000 UTC m=+153.284173844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.674947 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.675320 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.175306455 +0000 UTC m=+153.284760468 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.772714 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.773785 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.776425 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.776576 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.276556157 +0000 UTC m=+153.386010170 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.777973 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.802678 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.878521 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.878841 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.878936 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.378918287 +0000 UTC m=+153.488372300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.878990 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh2mw\" (UniqueName: \"kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.879135 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980051 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.980301 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.480281422 +0000 UTC m=+153.589735435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980364 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980398 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980445 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh2mw\" (UniqueName: \"kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980526 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.980648 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.981007 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: E1122 07:13:35.981316 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.481302698 +0000 UTC m=+153.590756711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.981616 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.982971 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:35 crc kubenswrapper[4929]: I1122 07:13:35.986275 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.003024 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.016125 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.016977 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.019793 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.021297 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.044818 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081077 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081325 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081356 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081398 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftqds\" (UniqueName: \"kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081446 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.081496 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.081653 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.581636807 +0000 UTC m=+153.691090820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.102678 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh2mw\" (UniqueName: \"kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw\") pod \"community-operators-6gdjz\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183025 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183091 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183181 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183233 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183279 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftqds\" (UniqueName: \"kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183323 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.183713 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.190055 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.191573 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.191810 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.691797551 +0000 UTC m=+153.801251564 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.192152 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.192562 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.220003 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.238386 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftqds\" (UniqueName: \"kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds\") pod \"certified-operators-94k7f\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.242303 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.285383 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.285734 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.285783 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csstg\" (UniqueName: \"kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.285808 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.285928 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.785910875 +0000 UTC m=+153.895364888 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.302366 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.359043 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.375620 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.376775 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.383631 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.439590 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.439855 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.439932 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csstg\" (UniqueName: \"kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.439966 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.440014 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.440480 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:36.940465585 +0000 UTC m=+154.049919598 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.440990 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.449083 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" event={"ID":"d83571fe-a17e-49ad-a121-b58f90d914d7","Type":"ContainerStarted","Data":"e54c4456ac8853b7154769baeae2770f441460f021a0455f2254faef001e574d"} Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.472253 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csstg\" (UniqueName: \"kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.541092 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.541293 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.041269017 +0000 UTC m=+154.150723030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.541387 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.541549 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x8np\" (UniqueName: \"kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.541668 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.541829 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.541958 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.041950544 +0000 UTC m=+154.151404557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.641042 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content\") pod \"community-operators-qntx2\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.642770 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.643403 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.643466 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.643541 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x8np\" (UniqueName: \"kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.644120 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.144095078 +0000 UTC m=+154.253549171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.644574 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.644876 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.649725 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:36 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:36 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:36 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.649777 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.673658 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x8np\" (UniqueName: \"kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np\") pod \"certified-operators-lfbtc\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.744771 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.745096 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.245080134 +0000 UTC m=+154.354534147 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.756706 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:13:36 crc kubenswrapper[4929]: W1122 07:13:36.762167 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dd2a0c6_cab8_4398_b9ce_30fc74285bfd.slice/crio-ee2fd9339c5d6a80c1cb17504356f00923c61dd4de6fc2ae2ee82f4230ccc09d WatchSource:0}: Error finding container ee2fd9339c5d6a80c1cb17504356f00923c61dd4de6fc2ae2ee82f4230ccc09d: Status 404 returned error can't find the container with id ee2fd9339c5d6a80c1cb17504356f00923c61dd4de6fc2ae2ee82f4230ccc09d Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.763094 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.811052 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.845849 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.846030 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.346005218 +0000 UTC m=+154.455459231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.846109 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.846570 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.346555541 +0000 UTC m=+154.456009554 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.853778 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:13:36 crc kubenswrapper[4929]: W1122 07:13:36.865255 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffefae1e_6d13_4fe0_bc00_d9d828551582.slice/crio-f054230528b8bb316e974fb6f0faef66597bfdcbd7d5c2e5f5de3ca1514de9b9 WatchSource:0}: Error finding container f054230528b8bb316e974fb6f0faef66597bfdcbd7d5c2e5f5de3ca1514de9b9: Status 404 returned error can't find the container with id f054230528b8bb316e974fb6f0faef66597bfdcbd7d5c2e5f5de3ca1514de9b9 Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.891445 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.947284 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.947460 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.447434184 +0000 UTC m=+154.556888197 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.947732 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:36 crc kubenswrapper[4929]: E1122 07:13:36.948008 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.448000388 +0000 UTC m=+154.557454401 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:36 crc kubenswrapper[4929]: I1122 07:13:36.956938 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.030996 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:13:37 crc kubenswrapper[4929]: W1122 07:13:37.042416 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ed8fd8e_4ded_4661_af4b_64c6e80cc613.slice/crio-d3c47adaee973620e17c1ded56cf2a79d7e41ef98aa81d657f4590fda1cca055 WatchSource:0}: Error finding container d3c47adaee973620e17c1ded56cf2a79d7e41ef98aa81d657f4590fda1cca055: Status 404 returned error can't find the container with id d3c47adaee973620e17c1ded56cf2a79d7e41ef98aa81d657f4590fda1cca055 Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.048628 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.052317 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.552298886 +0000 UTC m=+154.661752889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.153628 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.153957 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.653945319 +0000 UTC m=+154.763399332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.255357 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.255605 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.7555851 +0000 UTC m=+154.865039113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.356507 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.357087 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.857061438 +0000 UTC m=+154.966515451 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.387483 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-4ww8c" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.430967 4929 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.454630 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1b4c6879-fbf4-4589-aae5-3a6719cb42b6","Type":"ContainerStarted","Data":"0c5bb7be27ae3e4a9fe36920e4369e3240988c10b9ba8a04370ce98709016c18"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.455571 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerStarted","Data":"f054230528b8bb316e974fb6f0faef66597bfdcbd7d5c2e5f5de3ca1514de9b9"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.456438 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerStarted","Data":"01cb4a31e50292f487df77109fc4247fa2e4d4d661fc3c7abe168d8c3ac78829"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.457102 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.457265 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.957245203 +0000 UTC m=+155.066699216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.457382 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.457705 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:37.957693354 +0000 UTC m=+155.067147367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.458469 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerStarted","Data":"d3c47adaee973620e17c1ded56cf2a79d7e41ef98aa81d657f4590fda1cca055"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.459801 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerStarted","Data":"c11ee559c0fd9c818c16bcd10580fd97f2d2a0d091d22a40f5fe9ae2b91860c4"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.459830 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerStarted","Data":"ee2fd9339c5d6a80c1cb17504356f00923c61dd4de6fc2ae2ee82f4230ccc09d"} Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.558748 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.558958 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.058932146 +0000 UTC m=+155.168386159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.559058 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.559405 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.059397868 +0000 UTC m=+155.168851881 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.647910 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:37 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:37 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:37 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.647996 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.656196 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.659926 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.660117 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.160089836 +0000 UTC m=+155.269543849 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.660242 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.660605 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.160594818 +0000 UTC m=+155.270048891 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.713725 4929 patch_prober.go:28] interesting pod/apiserver-76f77b778f-nnnk9 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]log ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]etcd ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/generic-apiserver-start-informers ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/max-in-flight-filter ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 22 07:13:37 crc kubenswrapper[4929]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 22 07:13:37 crc kubenswrapper[4929]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/project.openshift.io-projectcache ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/openshift.io-startinformers ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 22 07:13:37 crc kubenswrapper[4929]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 22 07:13:37 crc kubenswrapper[4929]: livez check failed Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.713809 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" podUID="a410f03e-218e-4646-9b41-17a32af9330d" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.754299 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.761089 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.761500 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.261467501 +0000 UTC m=+155.370921514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.761565 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.761890 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.261882341 +0000 UTC m=+155.371336354 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.762992 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-wmd4n" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.797990 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-b7hfk" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.809158 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.810781 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.812483 4929 patch_prober.go:28] interesting pod/console-f9d7485db-tqwhs container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.812526 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-tqwhs" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.863100 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.863302 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.363275617 +0000 UTC m=+155.472729630 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.863414 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.863886 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.363868182 +0000 UTC m=+155.473322255 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.944220 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d4gn6" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.954147 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.955324 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.957034 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.960654 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nbrph" Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.963032 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:13:37 crc kubenswrapper[4929]: I1122 07:13:37.965104 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:37 crc kubenswrapper[4929]: E1122 07:13:37.966470 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.466447707 +0000 UTC m=+155.575901730 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.066569 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.066621 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.066694 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dzh6\" (UniqueName: \"kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.066740 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: E1122 07:13:38.067925 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.567909595 +0000 UTC m=+155.677363608 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.167369 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:38 crc kubenswrapper[4929]: E1122 07:13:38.167563 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.667537246 +0000 UTC m=+155.776991259 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.167664 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.167735 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.167838 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dzh6\" (UniqueName: \"kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.167935 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.168394 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.168606 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: E1122 07:13:38.168846 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.668839509 +0000 UTC m=+155.778293522 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.188551 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dzh6\" (UniqueName: \"kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6\") pod \"redhat-marketplace-4ddw7\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.269274 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:38 crc kubenswrapper[4929]: E1122 07:13:38.269478 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.769452115 +0000 UTC m=+155.878906128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.269679 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: E1122 07:13:38.270053 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 07:13:38.770009309 +0000 UTC m=+155.879463322 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-f5nrw" (UID: "6776df8d-1529-41a1-9474-d368b6631779") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.271090 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.274544 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.285118 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.299166 4929 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-22T07:13:37.431004109Z","Handler":null,"Name":""} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.303618 4929 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.303657 4929 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.351011 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.352250 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.370580 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.388415 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.396298 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.398256 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.399770 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.404640 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.468190 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.470828 4929 generic.go:334] "Generic (PLEG): container finished" podID="a95c39bb-f2ae-4a5c-897f-1ac3a476c436" containerID="1070fcba26aa714e79a73ce3e21951a05405b60a3a175241606025bd3a9317f8" exitCode=0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.470865 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" event={"ID":"a95c39bb-f2ae-4a5c-897f-1ac3a476c436","Type":"ContainerDied","Data":"1070fcba26aa714e79a73ce3e21951a05405b60a3a175241606025bd3a9317f8"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.472079 4929 generic.go:334] "Generic (PLEG): container finished" podID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerID="fb07a45f5b7ed348cfbd6c5630c823c0a53f29e76987c91c302202301347d240" exitCode=0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.472108 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerDied","Data":"fb07a45f5b7ed348cfbd6c5630c823c0a53f29e76987c91c302202301347d240"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474104 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474130 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474554 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474571 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474795 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.474809 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.476437 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.476500 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.476535 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.476560 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqz6b\" (UniqueName: \"kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.477524 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" event={"ID":"d83571fe-a17e-49ad-a121-b58f90d914d7","Type":"ContainerStarted","Data":"e3e9fbcd78f954f4ca90123af57cb565569a612192d625e289c154b1b915523b"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.479692 4929 generic.go:334] "Generic (PLEG): container finished" podID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerID="da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311" exitCode=0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.479750 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerDied","Data":"da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.495970 4929 generic.go:334] "Generic (PLEG): container finished" podID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerID="e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4" exitCode=0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.496019 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerDied","Data":"e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.501033 4929 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.501084 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.506446 4929 generic.go:334] "Generic (PLEG): container finished" podID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerID="c11ee559c0fd9c818c16bcd10580fd97f2d2a0d091d22a40f5fe9ae2b91860c4" exitCode=0 Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.506666 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerDied","Data":"c11ee559c0fd9c818c16bcd10580fd97f2d2a0d091d22a40f5fe9ae2b91860c4"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.509716 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.513882 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1b4c6879-fbf4-4589-aae5-3a6719cb42b6","Type":"ContainerStarted","Data":"d50dbd16d6aed67b0573f91e86c0d4c18a5b97ca99fa2dc02b11717c431579eb"} Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.518068 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-hr5lv" podStartSLOduration=13.518049178 podStartE2EDuration="13.518049178s" podCreationTimestamp="2025-11-22 07:13:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:38.513564616 +0000 UTC m=+155.623018649" watchObservedRunningTime="2025-11-22 07:13:38.518049178 +0000 UTC m=+155.627503191" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.552395 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.553029 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.555837 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.559176 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.575744 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.577523 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqz6b\" (UniqueName: \"kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.578130 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.578226 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.581156 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.581231 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.620119 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqz6b\" (UniqueName: \"kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b\") pod \"redhat-marketplace-p6btf\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.645819 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.648551 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:38 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:38 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:38 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.648600 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.665778 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-f5nrw\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.666847 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.679341 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.679460 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.780432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.780863 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.780948 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.791530 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.802385 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.872104 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.877258 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.954036 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.955317 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.956858 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.962186 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:38 crc kubenswrapper[4929]: I1122 07:13:38.965347 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.084311 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.084375 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.084438 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94hdq\" (UniqueName: \"kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.186330 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.186788 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.186921 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94hdq\" (UniqueName: \"kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.188850 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.188936 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.206972 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94hdq\" (UniqueName: \"kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq\") pod \"redhat-operators-tlqcg\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.268442 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.339784 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 07:13:39 crc kubenswrapper[4929]: W1122 07:13:39.377145 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podeea1e8a3_ccaa_4736_8dbb_e74280f5b5fa.slice/crio-fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a WatchSource:0}: Error finding container fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a: Status 404 returned error can't find the container with id fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.384507 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.397691 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.410395 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.447675 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.501298 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.501388 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbkrt\" (UniqueName: \"kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.501417 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.510729 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:13:39 crc kubenswrapper[4929]: W1122 07:13:39.523774 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e8c08c4_b36e_49ee_bdfe_003d398a267e.slice/crio-f632d41e546b7d200ee434d04bc14c734d39b38be85b450f3623b9ff5babafc5 WatchSource:0}: Error finding container f632d41e546b7d200ee434d04bc14c734d39b38be85b450f3623b9ff5babafc5: Status 404 returned error can't find the container with id f632d41e546b7d200ee434d04bc14c734d39b38be85b450f3623b9ff5babafc5 Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.531232 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa","Type":"ContainerStarted","Data":"fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a"} Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.533366 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerStarted","Data":"f7c900699fc1e32fe08bd4386b16614ca2c26a641fa06d585516915745b01a16"} Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.535364 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerStarted","Data":"82c3fcccb78a28ca52d1eedd54dd62f55506e6ed9641adc23a4aad83bf79db34"} Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.536867 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" event={"ID":"6776df8d-1529-41a1-9474-d368b6631779","Type":"ContainerStarted","Data":"65382d8278e021cb0fe9453294bb0a4f0de3bd396ccbb4ea0ea3fe94d04bb6b2"} Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.562368 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=4.562350512 podStartE2EDuration="4.562350512s" podCreationTimestamp="2025-11-22 07:13:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:39.55989389 +0000 UTC m=+156.669347913" watchObservedRunningTime="2025-11-22 07:13:39.562350512 +0000 UTC m=+156.671804525" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.608570 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.608678 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbkrt\" (UniqueName: \"kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.608728 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.609540 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.613549 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.632098 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbkrt\" (UniqueName: \"kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt\") pod \"redhat-operators-bjwv6\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.648396 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:39 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:39 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:39 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.648453 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.711327 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.804686 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.882169 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.912989 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume\") pod \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.913080 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krxdw\" (UniqueName: \"kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw\") pod \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.913203 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume\") pod \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\" (UID: \"a95c39bb-f2ae-4a5c-897f-1ac3a476c436\") " Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.919423 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume" (OuterVolumeSpecName: "config-volume") pod "a95c39bb-f2ae-4a5c-897f-1ac3a476c436" (UID: "a95c39bb-f2ae-4a5c-897f-1ac3a476c436"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.921834 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a95c39bb-f2ae-4a5c-897f-1ac3a476c436" (UID: "a95c39bb-f2ae-4a5c-897f-1ac3a476c436"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.922876 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw" (OuterVolumeSpecName: "kube-api-access-krxdw") pod "a95c39bb-f2ae-4a5c-897f-1ac3a476c436" (UID: "a95c39bb-f2ae-4a5c-897f-1ac3a476c436"). InnerVolumeSpecName "kube-api-access-krxdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:13:39 crc kubenswrapper[4929]: I1122 07:13:39.964078 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.015167 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.015197 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.015219 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krxdw\" (UniqueName: \"kubernetes.io/projected/a95c39bb-f2ae-4a5c-897f-1ac3a476c436-kube-api-access-krxdw\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.543504 4929 generic.go:334] "Generic (PLEG): container finished" podID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerID="bdb6dc678698fbabc91a694817ec1859cf64e8a27702729eaa4c10079aeab1f5" exitCode=0 Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.543576 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerDied","Data":"bdb6dc678698fbabc91a694817ec1859cf64e8a27702729eaa4c10079aeab1f5"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.545114 4929 generic.go:334] "Generic (PLEG): container finished" podID="1b4c6879-fbf4-4589-aae5-3a6719cb42b6" containerID="d50dbd16d6aed67b0573f91e86c0d4c18a5b97ca99fa2dc02b11717c431579eb" exitCode=0 Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.545176 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1b4c6879-fbf4-4589-aae5-3a6719cb42b6","Type":"ContainerDied","Data":"d50dbd16d6aed67b0573f91e86c0d4c18a5b97ca99fa2dc02b11717c431579eb"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.548263 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.548270 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm" event={"ID":"a95c39bb-f2ae-4a5c-897f-1ac3a476c436","Type":"ContainerDied","Data":"2aed47b0acb0806c373ea09651fd427ce95dcb8f61e160bfd08ec540c4ffd49a"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.548309 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2aed47b0acb0806c373ea09651fd427ce95dcb8f61e160bfd08ec540c4ffd49a" Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.560641 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerStarted","Data":"2cfbadde9c89afc3b12a408bca3a0653cf1e69d4e613922e7f9014682f7e770e"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.561814 4929 generic.go:334] "Generic (PLEG): container finished" podID="397a142d-10a3-47ad-a662-b8effcccd19d" containerID="46cd5ead3cd60056da33216ada4d7ee7642ce5e1a3dd4be1eda1ff7f11c94090" exitCode=0 Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.561883 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerDied","Data":"46cd5ead3cd60056da33216ada4d7ee7642ce5e1a3dd4be1eda1ff7f11c94090"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.566136 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" event={"ID":"6776df8d-1529-41a1-9474-d368b6631779","Type":"ContainerStarted","Data":"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.567499 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa","Type":"ContainerStarted","Data":"69b2a6ce983dd8500422f838ad5d07b787b2542a70af8312eda9a884b688e6e6"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.568505 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerStarted","Data":"6db0c80e2f56e0abe718e4791b3c1d4aeb54cdc910bc925b577e203058a0874c"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.568541 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerStarted","Data":"f632d41e546b7d200ee434d04bc14c734d39b38be85b450f3623b9ff5babafc5"} Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.649032 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:40 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:40 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:40 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:40 crc kubenswrapper[4929]: I1122 07:13:40.649111 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.575819 4929 generic.go:334] "Generic (PLEG): container finished" podID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerID="487380db37c4eebc32c8e03fddacf74cddc690decbc8388a14d884e603b223e8" exitCode=0 Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.575913 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerDied","Data":"487380db37c4eebc32c8e03fddacf74cddc690decbc8388a14d884e603b223e8"} Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.577631 4929 generic.go:334] "Generic (PLEG): container finished" podID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerID="6db0c80e2f56e0abe718e4791b3c1d4aeb54cdc910bc925b577e203058a0874c" exitCode=0 Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.578455 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerDied","Data":"6db0c80e2f56e0abe718e4791b3c1d4aeb54cdc910bc925b577e203058a0874c"} Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.579146 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.619865 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" podStartSLOduration=114.619841853 podStartE2EDuration="1m54.619841853s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:13:41.614795858 +0000 UTC m=+158.724249861" watchObservedRunningTime="2025-11-22 07:13:41.619841853 +0000 UTC m=+158.729295866" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.648074 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:41 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:41 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:41 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.648134 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.850405 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.938894 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access\") pod \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.939001 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir\") pod \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\" (UID: \"1b4c6879-fbf4-4589-aae5-3a6719cb42b6\") " Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.939348 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "1b4c6879-fbf4-4589-aae5-3a6719cb42b6" (UID: "1b4c6879-fbf4-4589-aae5-3a6719cb42b6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:13:41 crc kubenswrapper[4929]: I1122 07:13:41.945755 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1b4c6879-fbf4-4589-aae5-3a6719cb42b6" (UID: "1b4c6879-fbf4-4589-aae5-3a6719cb42b6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.040889 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.040929 4929 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1b4c6879-fbf4-4589-aae5-3a6719cb42b6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.584393 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1b4c6879-fbf4-4589-aae5-3a6719cb42b6","Type":"ContainerDied","Data":"0c5bb7be27ae3e4a9fe36920e4369e3240988c10b9ba8a04370ce98709016c18"} Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.584425 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.584441 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c5bb7be27ae3e4a9fe36920e4369e3240988c10b9ba8a04370ce98709016c18" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.587575 4929 generic.go:334] "Generic (PLEG): container finished" podID="eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" containerID="69b2a6ce983dd8500422f838ad5d07b787b2542a70af8312eda9a884b688e6e6" exitCode=0 Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.587746 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa","Type":"ContainerDied","Data":"69b2a6ce983dd8500422f838ad5d07b787b2542a70af8312eda9a884b688e6e6"} Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.648186 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:42 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:42 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:42 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.648369 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.706274 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:42 crc kubenswrapper[4929]: I1122 07:13:42.710830 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-nnnk9" Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.649978 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:43 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:43 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:43 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.650041 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.836679 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.961583 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-rwvvh" Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.968175 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir\") pod \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.968309 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access\") pod \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\" (UID: \"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa\") " Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.969202 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" (UID: "eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:13:43 crc kubenswrapper[4929]: I1122 07:13:43.973614 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" (UID: "eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.069408 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.069448 4929 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.600414 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.601049 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa","Type":"ContainerDied","Data":"fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a"} Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.601140 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fbcaf610618b4b8a1444a2c6367b3e8af437e96938e130cc93e882a9b749e46a" Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.649464 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:44 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:44 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:44 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:44 crc kubenswrapper[4929]: I1122 07:13:44.649535 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:45 crc kubenswrapper[4929]: I1122 07:13:45.648647 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:45 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:45 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:45 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:45 crc kubenswrapper[4929]: I1122 07:13:45.648707 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:46 crc kubenswrapper[4929]: I1122 07:13:46.647554 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:46 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:46 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:46 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:46 crc kubenswrapper[4929]: I1122 07:13:46.647926 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:47 crc kubenswrapper[4929]: I1122 07:13:47.647879 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:47 crc kubenswrapper[4929]: [-]has-synced failed: reason withheld Nov 22 07:13:47 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:47 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:47 crc kubenswrapper[4929]: I1122 07:13:47.647935 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:47 crc kubenswrapper[4929]: I1122 07:13:47.809668 4929 patch_prober.go:28] interesting pod/console-f9d7485db-tqwhs container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 22 07:13:47 crc kubenswrapper[4929]: I1122 07:13:47.809750 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-tqwhs" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.468780 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.468838 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.468872 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.468890 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.633863 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.646417 4929 patch_prober.go:28] interesting pod/router-default-5444994796-vp5z4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 07:13:48 crc kubenswrapper[4929]: [+]has-synced ok Nov 22 07:13:48 crc kubenswrapper[4929]: [+]process-running ok Nov 22 07:13:48 crc kubenswrapper[4929]: healthz check failed Nov 22 07:13:48 crc kubenswrapper[4929]: I1122 07:13:48.646473 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-vp5z4" podUID="5d850ad3-bdf5-479f-9ca7-da300763391c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 07:13:49 crc kubenswrapper[4929]: I1122 07:13:49.648891 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:49 crc kubenswrapper[4929]: I1122 07:13:49.652393 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-vp5z4" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.767300 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.767422 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.782509 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.783975 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.868391 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.868609 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.871600 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:50 crc kubenswrapper[4929]: I1122 07:13:50.872060 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:51 crc kubenswrapper[4929]: I1122 07:13:51.061252 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:13:51 crc kubenswrapper[4929]: I1122 07:13:51.071141 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 07:13:51 crc kubenswrapper[4929]: I1122 07:13:51.078176 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 07:13:57 crc kubenswrapper[4929]: I1122 07:13:57.857979 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:57 crc kubenswrapper[4929]: I1122 07:13:57.861783 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.469004 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.469395 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.469004 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.469467 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.469504 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.470250 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"798b17d9ed688325891fe3e1baf250da999bbc6b1a5376a3b8eb5e5c1f165984"} pod="openshift-console/downloads-7954f5f757-2zrch" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.470332 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" containerID="cri-o://798b17d9ed688325891fe3e1baf250da999bbc6b1a5376a3b8eb5e5c1f165984" gracePeriod=2 Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.471160 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.471186 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:13:58 crc kubenswrapper[4929]: I1122 07:13:58.969694 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:13:59 crc kubenswrapper[4929]: I1122 07:13:59.699749 4929 generic.go:334] "Generic (PLEG): container finished" podID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerID="798b17d9ed688325891fe3e1baf250da999bbc6b1a5376a3b8eb5e5c1f165984" exitCode=0 Nov 22 07:13:59 crc kubenswrapper[4929]: I1122 07:13:59.699787 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2zrch" event={"ID":"54a8c9df-7649-46eb-afdd-054fa0c1f5eb","Type":"ContainerDied","Data":"798b17d9ed688325891fe3e1baf250da999bbc6b1a5376a3b8eb5e5c1f165984"} Nov 22 07:14:08 crc kubenswrapper[4929]: I1122 07:14:08.470105 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:08 crc kubenswrapper[4929]: I1122 07:14:08.471328 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:08 crc kubenswrapper[4929]: I1122 07:14:08.595818 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-m7pl4" Nov 22 07:14:10 crc kubenswrapper[4929]: I1122 07:14:10.173359 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:14:10 crc kubenswrapper[4929]: I1122 07:14:10.175460 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 07:14:10 crc kubenswrapper[4929]: I1122 07:14:10.191358 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/42cb9248-6b5b-4970-8232-68883ec65710-metrics-certs\") pod \"network-metrics-daemon-vmdgb\" (UID: \"42cb9248-6b5b-4970-8232-68883ec65710\") " pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:14:10 crc kubenswrapper[4929]: I1122 07:14:10.412687 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 07:14:10 crc kubenswrapper[4929]: I1122 07:14:10.421075 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-vmdgb" Nov 22 07:14:18 crc kubenswrapper[4929]: I1122 07:14:18.468602 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:18 crc kubenswrapper[4929]: I1122 07:14:18.468908 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:18 crc kubenswrapper[4929]: I1122 07:14:18.594317 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:14:18 crc kubenswrapper[4929]: I1122 07:14:18.594372 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:14:25 crc kubenswrapper[4929]: E1122 07:14:25.989926 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 07:14:25 crc kubenswrapper[4929]: E1122 07:14:25.990644 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-csstg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-qntx2_openshift-marketplace(5ed8fd8e-4ded-4661-af4b-64c6e80cc613): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:25 crc kubenswrapper[4929]: E1122 07:14:25.991871 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-qntx2" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" Nov 22 07:14:26 crc kubenswrapper[4929]: E1122 07:14:26.006370 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 07:14:26 crc kubenswrapper[4929]: E1122 07:14:26.006543 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dh2mw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6gdjz_openshift-marketplace(3dd2a0c6-cab8-4398-b9ce-30fc74285bfd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:26 crc kubenswrapper[4929]: E1122 07:14:26.007797 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6gdjz" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" Nov 22 07:14:28 crc kubenswrapper[4929]: I1122 07:14:28.469367 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:28 crc kubenswrapper[4929]: I1122 07:14:28.469894 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:34 crc kubenswrapper[4929]: E1122 07:14:34.852894 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-6gdjz" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" Nov 22 07:14:38 crc kubenswrapper[4929]: I1122 07:14:38.469738 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:38 crc kubenswrapper[4929]: I1122 07:14:38.470122 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:40 crc kubenswrapper[4929]: E1122 07:14:40.788359 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 07:14:40 crc kubenswrapper[4929]: E1122 07:14:40.788589 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9x8np,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-lfbtc_openshift-marketplace(3ddb775a-6361-405f-9ea9-63d22b9a4f79): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:40 crc kubenswrapper[4929]: E1122 07:14:40.790034 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-lfbtc" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.613070 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.613852 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4dzh6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4ddw7_openshift-marketplace(397a142d-10a3-47ad-a662-b8effcccd19d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.615110 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4ddw7" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.634402 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.634598 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-sqz6b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-p6btf_openshift-marketplace(ce22e4ba-966e-46dc-a50f-cc3905f3da7c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.636161 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-p6btf" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.642412 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.642581 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ftqds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-94k7f_openshift-marketplace(ffefae1e-6d13-4fe0-bc00-d9d828551582): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:44 crc kubenswrapper[4929]: E1122 07:14:44.644347 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-94k7f" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" Nov 22 07:14:48 crc kubenswrapper[4929]: I1122 07:14:48.469412 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:48 crc kubenswrapper[4929]: I1122 07:14:48.469516 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:48 crc kubenswrapper[4929]: I1122 07:14:48.594343 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:14:48 crc kubenswrapper[4929]: I1122 07:14:48.594421 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:14:58 crc kubenswrapper[4929]: I1122 07:14:58.468394 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:14:58 crc kubenswrapper[4929]: I1122 07:14:58.469143 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.538867 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.539347 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lbkrt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-bjwv6_openshift-marketplace(68254869-b4b2-4c18-980e-4c1ce48555fe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.542435 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-bjwv6" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.547997 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.548207 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-94hdq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-tlqcg_openshift-marketplace(6e8c08c4-b36e-49ee-bdfe-003d398a267e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:14:59 crc kubenswrapper[4929]: E1122 07:14:59.550543 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-tlqcg" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" Nov 22 07:14:59 crc kubenswrapper[4929]: W1122 07:14:59.963768 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-b599ec77e2826d29fcdb001303aca07c35380319136872405db4aa75bd6a30e9 WatchSource:0}: Error finding container b599ec77e2826d29fcdb001303aca07c35380319136872405db4aa75bd6a30e9: Status 404 returned error can't find the container with id b599ec77e2826d29fcdb001303aca07c35380319136872405db4aa75bd6a30e9 Nov 22 07:14:59 crc kubenswrapper[4929]: W1122 07:14:59.968324 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod42cb9248_6b5b_4970_8232_68883ec65710.slice/crio-26897ac67d2b7282f58d5f509bc61b7f12c232357d3480452fe16dfb2144581d WatchSource:0}: Error finding container 26897ac67d2b7282f58d5f509bc61b7f12c232357d3480452fe16dfb2144581d: Status 404 returned error can't find the container with id 26897ac67d2b7282f58d5f509bc61b7f12c232357d3480452fe16dfb2144581d Nov 22 07:14:59 crc kubenswrapper[4929]: I1122 07:14:59.972203 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-vmdgb"] Nov 22 07:14:59 crc kubenswrapper[4929]: W1122 07:14:59.981265 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-9192aef09edb6d5542342586812e5273011951742919534d1d0ca01d1b7cac3f WatchSource:0}: Error finding container 9192aef09edb6d5542342586812e5273011951742919534d1d0ca01d1b7cac3f: Status 404 returned error can't find the container with id 9192aef09edb6d5542342586812e5273011951742919534d1d0ca01d1b7cac3f Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.015615 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-2zrch" event={"ID":"54a8c9df-7649-46eb-afdd-054fa0c1f5eb","Type":"ContainerStarted","Data":"8683afff8fa71760aa5e5b842d90c5b5092ae3b6a2438af43e2cbd480b5af9d1"} Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.016891 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" event={"ID":"42cb9248-6b5b-4970-8232-68883ec65710","Type":"ContainerStarted","Data":"26897ac67d2b7282f58d5f509bc61b7f12c232357d3480452fe16dfb2144581d"} Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.018082 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b599ec77e2826d29fcdb001303aca07c35380319136872405db4aa75bd6a30e9"} Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.019584 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a79b8c0d4bd1ae29eccb056ca7ea8a89daaed7f0d10482dbe6bcc301afcf72cb"} Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.021320 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9192aef09edb6d5542342586812e5273011951742919534d1d0ca01d1b7cac3f"} Nov 22 07:15:00 crc kubenswrapper[4929]: E1122 07:15:00.041843 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-tlqcg" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.158829 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr"] Nov 22 07:15:00 crc kubenswrapper[4929]: E1122 07:15:00.159349 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4c6879-fbf4-4589-aae5-3a6719cb42b6" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159363 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4c6879-fbf4-4589-aae5-3a6719cb42b6" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: E1122 07:15:00.159377 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159384 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: E1122 07:15:00.159397 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a95c39bb-f2ae-4a5c-897f-1ac3a476c436" containerName="collect-profiles" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159403 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a95c39bb-f2ae-4a5c-897f-1ac3a476c436" containerName="collect-profiles" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159523 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4c6879-fbf4-4589-aae5-3a6719cb42b6" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159543 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="a95c39bb-f2ae-4a5c-897f-1ac3a476c436" containerName="collect-profiles" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159556 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="eea1e8a3-ccaa-4736-8dbb-e74280f5b5fa" containerName="pruner" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.159899 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr"] Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.160000 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.162400 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.166584 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.277343 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.277405 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.277508 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgrc2\" (UniqueName: \"kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.379317 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.379454 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgrc2\" (UniqueName: \"kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.379520 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.380530 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.385973 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.397746 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgrc2\" (UniqueName: \"kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2\") pod \"collect-profiles-29396595-57zrr\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.532072 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:00 crc kubenswrapper[4929]: I1122 07:15:00.713702 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr"] Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.035423 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"802e584ec8bd1ae650ee1a1834e1278ccacaa61c4be055bf7d48a739aee2cd87"} Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.036577 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"db59eb9005c83667e70efe2ce818cdee074727e4851771ddfb78554b7f4ad85c"} Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.038452 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"26fadd04b84b0a0a61d22d7a7dd0c3eeef192de70814daf2d9ec95c402a8a7d7"} Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.039424 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" event={"ID":"6e73fb56-aba3-457d-813f-2087e73a8ea3","Type":"ContainerStarted","Data":"d728eb37e69ee942ca395c1981e6d0df2e3a746928762363ef6ed59c0c6f1f8d"} Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.039957 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.040281 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:01 crc kubenswrapper[4929]: I1122 07:15:01.040324 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:02 crc kubenswrapper[4929]: I1122 07:15:02.044476 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:02 crc kubenswrapper[4929]: I1122 07:15:02.045136 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:05 crc kubenswrapper[4929]: I1122 07:15:05.071441 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" event={"ID":"42cb9248-6b5b-4970-8232-68883ec65710","Type":"ContainerStarted","Data":"8f84afa0faa6f18f8b71e12cc7e2da5fb6afc4b435227521060d661d2a149300"} Nov 22 07:15:05 crc kubenswrapper[4929]: I1122 07:15:05.073129 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" event={"ID":"6e73fb56-aba3-457d-813f-2087e73a8ea3","Type":"ContainerStarted","Data":"8843328384b040163cb90d2dd7fc8fea147e7308e41aa125aa370f97494c059e"} Nov 22 07:15:06 crc kubenswrapper[4929]: I1122 07:15:06.094000 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" podStartSLOduration=6.09398423 podStartE2EDuration="6.09398423s" podCreationTimestamp="2025-11-22 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:15:06.092610786 +0000 UTC m=+243.202064819" watchObservedRunningTime="2025-11-22 07:15:06.09398423 +0000 UTC m=+243.203438243" Nov 22 07:15:07 crc kubenswrapper[4929]: I1122 07:15:07.085532 4929 generic.go:334] "Generic (PLEG): container finished" podID="6e73fb56-aba3-457d-813f-2087e73a8ea3" containerID="8843328384b040163cb90d2dd7fc8fea147e7308e41aa125aa370f97494c059e" exitCode=0 Nov 22 07:15:07 crc kubenswrapper[4929]: I1122 07:15:07.085889 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" event={"ID":"6e73fb56-aba3-457d-813f-2087e73a8ea3","Type":"ContainerDied","Data":"8843328384b040163cb90d2dd7fc8fea147e7308e41aa125aa370f97494c059e"} Nov 22 07:15:07 crc kubenswrapper[4929]: I1122 07:15:07.087798 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-vmdgb" event={"ID":"42cb9248-6b5b-4970-8232-68883ec65710","Type":"ContainerStarted","Data":"928c116e765c4d146a4e7cd556f599eaa7f7f8223a654b9609c8f9c7abf26911"} Nov 22 07:15:07 crc kubenswrapper[4929]: I1122 07:15:07.121636 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-vmdgb" podStartSLOduration=200.121618448 podStartE2EDuration="3m20.121618448s" podCreationTimestamp="2025-11-22 07:11:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:15:07.119237568 +0000 UTC m=+244.228691601" watchObservedRunningTime="2025-11-22 07:15:07.121618448 +0000 UTC m=+244.231072461" Nov 22 07:15:08 crc kubenswrapper[4929]: I1122 07:15:08.514826 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:08 crc kubenswrapper[4929]: I1122 07:15:08.514913 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:08 crc kubenswrapper[4929]: I1122 07:15:08.515271 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:08 crc kubenswrapper[4929]: I1122 07:15:08.515337 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:11 crc kubenswrapper[4929]: I1122 07:15:11.062257 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.088610 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.159840 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume\") pod \"6e73fb56-aba3-457d-813f-2087e73a8ea3\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.159973 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lgrc2\" (UniqueName: \"kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2\") pod \"6e73fb56-aba3-457d-813f-2087e73a8ea3\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.159999 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume\") pod \"6e73fb56-aba3-457d-813f-2087e73a8ea3\" (UID: \"6e73fb56-aba3-457d-813f-2087e73a8ea3\") " Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.160944 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume" (OuterVolumeSpecName: "config-volume") pod "6e73fb56-aba3-457d-813f-2087e73a8ea3" (UID: "6e73fb56-aba3-457d-813f-2087e73a8ea3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.166426 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2" (OuterVolumeSpecName: "kube-api-access-lgrc2") pod "6e73fb56-aba3-457d-813f-2087e73a8ea3" (UID: "6e73fb56-aba3-457d-813f-2087e73a8ea3"). InnerVolumeSpecName "kube-api-access-lgrc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.166611 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" event={"ID":"6e73fb56-aba3-457d-813f-2087e73a8ea3","Type":"ContainerDied","Data":"d728eb37e69ee942ca395c1981e6d0df2e3a746928762363ef6ed59c0c6f1f8d"} Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.166662 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d728eb37e69ee942ca395c1981e6d0df2e3a746928762363ef6ed59c0c6f1f8d" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.166725 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.169057 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "6e73fb56-aba3-457d-813f-2087e73a8ea3" (UID: "6e73fb56-aba3-457d-813f-2087e73a8ea3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.261502 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/6e73fb56-aba3-457d-813f-2087e73a8ea3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.261540 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lgrc2\" (UniqueName: \"kubernetes.io/projected/6e73fb56-aba3-457d-813f-2087e73a8ea3-kube-api-access-lgrc2\") on node \"crc\" DevicePath \"\"" Nov 22 07:15:17 crc kubenswrapper[4929]: I1122 07:15:17.261553 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/6e73fb56-aba3-457d-813f-2087e73a8ea3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.468816 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.468944 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.469505 4929 patch_prober.go:28] interesting pod/downloads-7954f5f757-2zrch container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.469578 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-2zrch" podUID="54a8c9df-7649-46eb-afdd-054fa0c1f5eb" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.594558 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.594630 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.594681 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.595394 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:15:18 crc kubenswrapper[4929]: I1122 07:15:18.595451 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e" gracePeriod=600 Nov 22 07:15:21 crc kubenswrapper[4929]: I1122 07:15:21.190313 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e" exitCode=0 Nov 22 07:15:21 crc kubenswrapper[4929]: I1122 07:15:21.190672 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e"} Nov 22 07:15:28 crc kubenswrapper[4929]: I1122 07:15:28.481707 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-2zrch" Nov 22 07:15:31 crc kubenswrapper[4929]: I1122 07:15:31.346412 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 07:15:43 crc kubenswrapper[4929]: I1122 07:15:43.387439 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.396383 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerStarted","Data":"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.398543 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerStarted","Data":"45304dde650a3185710803e03bef2556a023909efee959c0ff7b1a8cd3a36867"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.401205 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerStarted","Data":"6d603b7aa8ee36ad5d179463a75b534c83e3fac7ed352778f64447c13b4d0022"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.403294 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerStarted","Data":"013e0ed0a33d4804fafcb47022a808a0b64a633239fe2bef432b5f78c92bd984"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.405298 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerStarted","Data":"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.407246 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerStarted","Data":"5d44d91e5de779607db10306a1ab49cfd97189097bab2e95ab823394967ffec0"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.410183 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerStarted","Data":"c1d2ab4ecbecca89bdabb5a071b3b388105aae750ba91c4e0981a53c00ce44ed"} Nov 22 07:15:44 crc kubenswrapper[4929]: I1122 07:15:44.412371 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerStarted","Data":"b5d22fb4a772845dbe34dbf333a51715780867eda5fc8e5186ec403879a20102"} Nov 22 07:15:46 crc kubenswrapper[4929]: E1122 07:15:46.369239 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ed8fd8e_4ded_4661_af4b_64c6e80cc613.slice/crio-84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae.scope\": RecentStats: unable to find data in memory cache]" Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.436573 4929 generic.go:334] "Generic (PLEG): container finished" podID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerID="b5d22fb4a772845dbe34dbf333a51715780867eda5fc8e5186ec403879a20102" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.436721 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerDied","Data":"b5d22fb4a772845dbe34dbf333a51715780867eda5fc8e5186ec403879a20102"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.440964 4929 generic.go:334] "Generic (PLEG): container finished" podID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerID="84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.441048 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerDied","Data":"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.444349 4929 generic.go:334] "Generic (PLEG): container finished" podID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerID="45304dde650a3185710803e03bef2556a023909efee959c0ff7b1a8cd3a36867" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.444807 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerDied","Data":"45304dde650a3185710803e03bef2556a023909efee959c0ff7b1a8cd3a36867"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.448233 4929 generic.go:334] "Generic (PLEG): container finished" podID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerID="6d603b7aa8ee36ad5d179463a75b534c83e3fac7ed352778f64447c13b4d0022" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.448352 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerDied","Data":"6d603b7aa8ee36ad5d179463a75b534c83e3fac7ed352778f64447c13b4d0022"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.450749 4929 generic.go:334] "Generic (PLEG): container finished" podID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerID="013e0ed0a33d4804fafcb47022a808a0b64a633239fe2bef432b5f78c92bd984" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.450845 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerDied","Data":"013e0ed0a33d4804fafcb47022a808a0b64a633239fe2bef432b5f78c92bd984"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.453732 4929 generic.go:334] "Generic (PLEG): container finished" podID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerID="5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.453838 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerDied","Data":"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.462977 4929 generic.go:334] "Generic (PLEG): container finished" podID="397a142d-10a3-47ad-a662-b8effcccd19d" containerID="5d44d91e5de779607db10306a1ab49cfd97189097bab2e95ab823394967ffec0" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.463130 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerDied","Data":"5d44d91e5de779607db10306a1ab49cfd97189097bab2e95ab823394967ffec0"} Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.471558 4929 generic.go:334] "Generic (PLEG): container finished" podID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerID="c1d2ab4ecbecca89bdabb5a071b3b388105aae750ba91c4e0981a53c00ce44ed" exitCode=0 Nov 22 07:15:48 crc kubenswrapper[4929]: I1122 07:15:48.471615 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerDied","Data":"c1d2ab4ecbecca89bdabb5a071b3b388105aae750ba91c4e0981a53c00ce44ed"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.974849 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerStarted","Data":"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.977492 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerStarted","Data":"47e807f3e5e6aa901c3b6158ce2635e04cb5c7491657465eda9313af9fd61e4c"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.979488 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerStarted","Data":"052dc6b9f731183b523bdf53cb6027f4fad177bd6ab456f77cd610d79f81ed81"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.981197 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerStarted","Data":"4d9f062ebba0fbc85d675b3eded67bef5160bb03e715389e4ad6b9e748325e26"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.983233 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerStarted","Data":"7e670c1e1a819c53fe383961a81a2897ba788eb26d2f917f349c98588d28d8b0"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.985439 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerStarted","Data":"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.989170 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerStarted","Data":"e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.995372 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerStarted","Data":"c2b5cc49c78dbcfe7c477d85bca85ac4f98aa0dc08c085141c1b79c9aa93ed2f"} Nov 22 07:17:11 crc kubenswrapper[4929]: I1122 07:17:11.997683 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qntx2" podStartSLOduration=13.286297858 podStartE2EDuration="3m35.997666698s" podCreationTimestamp="2025-11-22 07:13:36 +0000 UTC" firstStartedPulling="2025-11-22 07:13:39.563727606 +0000 UTC m=+156.673181629" lastFinishedPulling="2025-11-22 07:17:02.275096456 +0000 UTC m=+359.384550469" observedRunningTime="2025-11-22 07:17:11.991374724 +0000 UTC m=+369.100828737" watchObservedRunningTime="2025-11-22 07:17:11.997666698 +0000 UTC m=+369.107120711" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.010459 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4ddw7" podStartSLOduration=6.325948711 podStartE2EDuration="3m35.010429182s" podCreationTimestamp="2025-11-22 07:13:37 +0000 UTC" firstStartedPulling="2025-11-22 07:13:41.579475418 +0000 UTC m=+158.688929431" lastFinishedPulling="2025-11-22 07:17:10.263955889 +0000 UTC m=+367.373409902" observedRunningTime="2025-11-22 07:17:12.005810761 +0000 UTC m=+369.115264784" watchObservedRunningTime="2025-11-22 07:17:12.010429182 +0000 UTC m=+369.119883205" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.023437 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6gdjz" podStartSLOduration=5.2675637 podStartE2EDuration="3m37.023418211s" podCreationTimestamp="2025-11-22 07:13:35 +0000 UTC" firstStartedPulling="2025-11-22 07:13:38.509385722 +0000 UTC m=+155.618839735" lastFinishedPulling="2025-11-22 07:17:10.265240233 +0000 UTC m=+367.374694246" observedRunningTime="2025-11-22 07:17:12.022545408 +0000 UTC m=+369.131999441" watchObservedRunningTime="2025-11-22 07:17:12.023418211 +0000 UTC m=+369.132872234" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.039396 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-94k7f" podStartSLOduration=15.211864133 podStartE2EDuration="3m37.039379618s" podCreationTimestamp="2025-11-22 07:13:35 +0000 UTC" firstStartedPulling="2025-11-22 07:13:39.562184138 +0000 UTC m=+156.671638151" lastFinishedPulling="2025-11-22 07:17:01.389699623 +0000 UTC m=+358.499153636" observedRunningTime="2025-11-22 07:17:12.039117051 +0000 UTC m=+369.148571084" watchObservedRunningTime="2025-11-22 07:17:12.039379618 +0000 UTC m=+369.148833631" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.062794 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lfbtc" podStartSLOduration=5.359772025 podStartE2EDuration="3m36.062774799s" podCreationTimestamp="2025-11-22 07:13:36 +0000 UTC" firstStartedPulling="2025-11-22 07:13:39.562190698 +0000 UTC m=+156.671644711" lastFinishedPulling="2025-11-22 07:17:10.265193472 +0000 UTC m=+367.374647485" observedRunningTime="2025-11-22 07:17:12.061762523 +0000 UTC m=+369.171216546" watchObservedRunningTime="2025-11-22 07:17:12.062774799 +0000 UTC m=+369.172228832" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.078835 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bjwv6" podStartSLOduration=5.404119949 podStartE2EDuration="3m33.078820518s" podCreationTimestamp="2025-11-22 07:13:39 +0000 UTC" firstStartedPulling="2025-11-22 07:13:42.59038714 +0000 UTC m=+159.699841153" lastFinishedPulling="2025-11-22 07:17:10.265087709 +0000 UTC m=+367.374541722" observedRunningTime="2025-11-22 07:17:12.07849784 +0000 UTC m=+369.187951853" watchObservedRunningTime="2025-11-22 07:17:12.078820518 +0000 UTC m=+369.188274531" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.098242 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tlqcg" podStartSLOduration=5.413957959 podStartE2EDuration="3m34.098221475s" podCreationTimestamp="2025-11-22 07:13:38 +0000 UTC" firstStartedPulling="2025-11-22 07:13:41.579710824 +0000 UTC m=+158.689164837" lastFinishedPulling="2025-11-22 07:17:10.26397434 +0000 UTC m=+367.373428353" observedRunningTime="2025-11-22 07:17:12.094643322 +0000 UTC m=+369.204097335" watchObservedRunningTime="2025-11-22 07:17:12.098221475 +0000 UTC m=+369.207675488" Nov 22 07:17:12 crc kubenswrapper[4929]: I1122 07:17:12.119308 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p6btf" podStartSLOduration=18.989308959 podStartE2EDuration="3m34.119289616s" podCreationTimestamp="2025-11-22 07:13:38 +0000 UTC" firstStartedPulling="2025-11-22 07:13:40.544640991 +0000 UTC m=+157.654095004" lastFinishedPulling="2025-11-22 07:16:55.674621638 +0000 UTC m=+352.784075661" observedRunningTime="2025-11-22 07:17:12.116857992 +0000 UTC m=+369.226312025" watchObservedRunningTime="2025-11-22 07:17:12.119289616 +0000 UTC m=+369.228743629" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.303309 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.303918 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.440725 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.441160 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.752525 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.753526 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.764407 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.765115 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.805694 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.811609 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.818459 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:16 crc kubenswrapper[4929]: I1122 07:17:16.852682 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:17 crc kubenswrapper[4929]: I1122 07:17:17.061227 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:17:17 crc kubenswrapper[4929]: I1122 07:17:17.065404 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:17 crc kubenswrapper[4929]: I1122 07:17:17.072167 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:17 crc kubenswrapper[4929]: I1122 07:17:17.073849 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:17:17 crc kubenswrapper[4929]: I1122 07:17:17.604972 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.285479 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.285877 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.330591 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.667826 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.667878 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:18 crc kubenswrapper[4929]: I1122 07:17:18.715531 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.005582 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.030589 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lfbtc" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="registry-server" containerID="cri-o://9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76" gracePeriod=2 Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.074712 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.077618 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.272333 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.272577 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.323873 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.712174 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.712730 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.753272 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:19 crc kubenswrapper[4929]: I1122 07:17:19.931718 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.024623 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.046899 4929 generic.go:334] "Generic (PLEG): container finished" podID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerID="9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76" exitCode=0 Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.047917 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lfbtc" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.048356 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerDied","Data":"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76"} Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.048387 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lfbtc" event={"ID":"3ddb775a-6361-405f-9ea9-63d22b9a4f79","Type":"ContainerDied","Data":"01cb4a31e50292f487df77109fc4247fa2e4d4d661fc3c7abe168d8c3ac78829"} Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.048407 4929 scope.go:117] "RemoveContainer" containerID="9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.049520 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qntx2" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="registry-server" containerID="cri-o://84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9" gracePeriod=2 Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.084360 4929 scope.go:117] "RemoveContainer" containerID="5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.092874 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities\") pod \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.092986 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content\") pod \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.093058 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x8np\" (UniqueName: \"kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np\") pod \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\" (UID: \"3ddb775a-6361-405f-9ea9-63d22b9a4f79\") " Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.093775 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities" (OuterVolumeSpecName: "utilities") pod "3ddb775a-6361-405f-9ea9-63d22b9a4f79" (UID: "3ddb775a-6361-405f-9ea9-63d22b9a4f79"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.101970 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np" (OuterVolumeSpecName: "kube-api-access-9x8np") pod "3ddb775a-6361-405f-9ea9-63d22b9a4f79" (UID: "3ddb775a-6361-405f-9ea9-63d22b9a4f79"). InnerVolumeSpecName "kube-api-access-9x8np". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.129158 4929 scope.go:117] "RemoveContainer" containerID="da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.145619 4929 scope.go:117] "RemoveContainer" containerID="9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.145687 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:17:20 crc kubenswrapper[4929]: E1122 07:17:20.146535 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76\": container with ID starting with 9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76 not found: ID does not exist" containerID="9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.146568 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76"} err="failed to get container status \"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76\": rpc error: code = NotFound desc = could not find container \"9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76\": container with ID starting with 9c882027981053d85159d60cf14506fb11dfbc2c9025d3c66ea563b5585f6f76 not found: ID does not exist" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.146589 4929 scope.go:117] "RemoveContainer" containerID="5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf" Nov 22 07:17:20 crc kubenswrapper[4929]: E1122 07:17:20.146893 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf\": container with ID starting with 5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf not found: ID does not exist" containerID="5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.146919 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf"} err="failed to get container status \"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf\": rpc error: code = NotFound desc = could not find container \"5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf\": container with ID starting with 5aaf789e0b3192275da606794682415e505cafeed73520f2500a56f5f9c337bf not found: ID does not exist" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.146943 4929 scope.go:117] "RemoveContainer" containerID="da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311" Nov 22 07:17:20 crc kubenswrapper[4929]: E1122 07:17:20.147243 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311\": container with ID starting with da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311 not found: ID does not exist" containerID="da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.147266 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311"} err="failed to get container status \"da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311\": rpc error: code = NotFound desc = could not find container \"da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311\": container with ID starting with da6fab27830d7b68ede35961a7db7e9fdac67bb297894daf3b04d21e1ce0c311 not found: ID does not exist" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.147734 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.148034 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ddb775a-6361-405f-9ea9-63d22b9a4f79" (UID: "3ddb775a-6361-405f-9ea9-63d22b9a4f79"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.195782 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.196015 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ddb775a-6361-405f-9ea9-63d22b9a4f79-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.196087 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x8np\" (UniqueName: \"kubernetes.io/projected/3ddb775a-6361-405f-9ea9-63d22b9a4f79-kube-api-access-9x8np\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.375642 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.378893 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lfbtc"] Nov 22 07:17:20 crc kubenswrapper[4929]: I1122 07:17:20.918699 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.006918 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities\") pod \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.006956 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csstg\" (UniqueName: \"kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg\") pod \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.007039 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content\") pod \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\" (UID: \"5ed8fd8e-4ded-4661-af4b-64c6e80cc613\") " Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.008736 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities" (OuterVolumeSpecName: "utilities") pod "5ed8fd8e-4ded-4661-af4b-64c6e80cc613" (UID: "5ed8fd8e-4ded-4661-af4b-64c6e80cc613"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.011420 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg" (OuterVolumeSpecName: "kube-api-access-csstg") pod "5ed8fd8e-4ded-4661-af4b-64c6e80cc613" (UID: "5ed8fd8e-4ded-4661-af4b-64c6e80cc613"). InnerVolumeSpecName "kube-api-access-csstg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.056539 4929 generic.go:334] "Generic (PLEG): container finished" podID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerID="84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9" exitCode=0 Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.056749 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerDied","Data":"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9"} Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.056842 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qntx2" event={"ID":"5ed8fd8e-4ded-4661-af4b-64c6e80cc613","Type":"ContainerDied","Data":"d3c47adaee973620e17c1ded56cf2a79d7e41ef98aa81d657f4590fda1cca055"} Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.056883 4929 scope.go:117] "RemoveContainer" containerID="84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.057125 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qntx2" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.069911 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ed8fd8e-4ded-4661-af4b-64c6e80cc613" (UID: "5ed8fd8e-4ded-4661-af4b-64c6e80cc613"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.075767 4929 scope.go:117] "RemoveContainer" containerID="84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.095250 4929 scope.go:117] "RemoveContainer" containerID="e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.108574 4929 scope.go:117] "RemoveContainer" containerID="84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.108786 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.108820 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.108836 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csstg\" (UniqueName: \"kubernetes.io/projected/5ed8fd8e-4ded-4661-af4b-64c6e80cc613-kube-api-access-csstg\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:21 crc kubenswrapper[4929]: E1122 07:17:21.109566 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9\": container with ID starting with 84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9 not found: ID does not exist" containerID="84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.109595 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9"} err="failed to get container status \"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9\": rpc error: code = NotFound desc = could not find container \"84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9\": container with ID starting with 84f8a2dc1679a7f3a650a05941b0dd175d2126042cb92dbb696c523f9572f3c9 not found: ID does not exist" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.109631 4929 scope.go:117] "RemoveContainer" containerID="84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae" Nov 22 07:17:21 crc kubenswrapper[4929]: E1122 07:17:21.110026 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae\": container with ID starting with 84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae not found: ID does not exist" containerID="84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.110078 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae"} err="failed to get container status \"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae\": rpc error: code = NotFound desc = could not find container \"84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae\": container with ID starting with 84d6778cb93f88f4ebe3f0d63ff8dc8b4870fc448fbbf2be034159e551f887ae not found: ID does not exist" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.110097 4929 scope.go:117] "RemoveContainer" containerID="e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4" Nov 22 07:17:21 crc kubenswrapper[4929]: E1122 07:17:21.110504 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4\": container with ID starting with e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4 not found: ID does not exist" containerID="e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.110558 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4"} err="failed to get container status \"e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4\": rpc error: code = NotFound desc = could not find container \"e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4\": container with ID starting with e386662a3a5f30a08dba3fee59479ee212a49ed78e90fe894e6e29f820e52fc4 not found: ID does not exist" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.402360 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.408598 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qntx2"] Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.414619 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.414875 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p6btf" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="registry-server" containerID="cri-o://c2b5cc49c78dbcfe7c477d85bca85ac4f98aa0dc08c085141c1b79c9aa93ed2f" gracePeriod=2 Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.956612 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" path="/var/lib/kubelet/pods/3ddb775a-6361-405f-9ea9-63d22b9a4f79/volumes" Nov 22 07:17:21 crc kubenswrapper[4929]: I1122 07:17:21.957522 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" path="/var/lib/kubelet/pods/5ed8fd8e-4ded-4661-af4b-64c6e80cc613/volumes" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.066667 4929 generic.go:334] "Generic (PLEG): container finished" podID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerID="c2b5cc49c78dbcfe7c477d85bca85ac4f98aa0dc08c085141c1b79c9aa93ed2f" exitCode=0 Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.066754 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerDied","Data":"c2b5cc49c78dbcfe7c477d85bca85ac4f98aa0dc08c085141c1b79c9aa93ed2f"} Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.262134 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.429019 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content\") pod \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.429168 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqz6b\" (UniqueName: \"kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b\") pod \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.429243 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities\") pod \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\" (UID: \"ce22e4ba-966e-46dc-a50f-cc3905f3da7c\") " Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.429879 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities" (OuterVolumeSpecName: "utilities") pod "ce22e4ba-966e-46dc-a50f-cc3905f3da7c" (UID: "ce22e4ba-966e-46dc-a50f-cc3905f3da7c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.434587 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b" (OuterVolumeSpecName: "kube-api-access-sqz6b") pod "ce22e4ba-966e-46dc-a50f-cc3905f3da7c" (UID: "ce22e4ba-966e-46dc-a50f-cc3905f3da7c"). InnerVolumeSpecName "kube-api-access-sqz6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.445888 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ce22e4ba-966e-46dc-a50f-cc3905f3da7c" (UID: "ce22e4ba-966e-46dc-a50f-cc3905f3da7c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.530332 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.530360 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqz6b\" (UniqueName: \"kubernetes.io/projected/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-kube-api-access-sqz6b\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:22 crc kubenswrapper[4929]: I1122 07:17:22.530372 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ce22e4ba-966e-46dc-a50f-cc3905f3da7c-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.077016 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p6btf" event={"ID":"ce22e4ba-966e-46dc-a50f-cc3905f3da7c","Type":"ContainerDied","Data":"f7c900699fc1e32fe08bd4386b16614ca2c26a641fa06d585516915745b01a16"} Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.077093 4929 scope.go:117] "RemoveContainer" containerID="c2b5cc49c78dbcfe7c477d85bca85ac4f98aa0dc08c085141c1b79c9aa93ed2f" Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.077099 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p6btf" Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.095493 4929 scope.go:117] "RemoveContainer" containerID="b5d22fb4a772845dbe34dbf333a51715780867eda5fc8e5186ec403879a20102" Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.110728 4929 scope.go:117] "RemoveContainer" containerID="bdb6dc678698fbabc91a694817ec1859cf64e8a27702729eaa4c10079aeab1f5" Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.116287 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.117727 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p6btf"] Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.802711 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.802971 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bjwv6" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="registry-server" containerID="cri-o://4d9f062ebba0fbc85d675b3eded67bef5160bb03e715389e4ad6b9e748325e26" gracePeriod=2 Nov 22 07:17:23 crc kubenswrapper[4929]: I1122 07:17:23.954957 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" path="/var/lib/kubelet/pods/ce22e4ba-966e-46dc-a50f-cc3905f3da7c/volumes" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.085123 4929 generic.go:334] "Generic (PLEG): container finished" podID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerID="4d9f062ebba0fbc85d675b3eded67bef5160bb03e715389e4ad6b9e748325e26" exitCode=0 Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.085196 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerDied","Data":"4d9f062ebba0fbc85d675b3eded67bef5160bb03e715389e4ad6b9e748325e26"} Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.776853 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.857527 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content\") pod \"68254869-b4b2-4c18-980e-4c1ce48555fe\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.857629 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbkrt\" (UniqueName: \"kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt\") pod \"68254869-b4b2-4c18-980e-4c1ce48555fe\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.857648 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities\") pod \"68254869-b4b2-4c18-980e-4c1ce48555fe\" (UID: \"68254869-b4b2-4c18-980e-4c1ce48555fe\") " Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.858556 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities" (OuterVolumeSpecName: "utilities") pod "68254869-b4b2-4c18-980e-4c1ce48555fe" (UID: "68254869-b4b2-4c18-980e-4c1ce48555fe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.869392 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt" (OuterVolumeSpecName: "kube-api-access-lbkrt") pod "68254869-b4b2-4c18-980e-4c1ce48555fe" (UID: "68254869-b4b2-4c18-980e-4c1ce48555fe"). InnerVolumeSpecName "kube-api-access-lbkrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.954173 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68254869-b4b2-4c18-980e-4c1ce48555fe" (UID: "68254869-b4b2-4c18-980e-4c1ce48555fe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.959225 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.959265 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbkrt\" (UniqueName: \"kubernetes.io/projected/68254869-b4b2-4c18-980e-4c1ce48555fe-kube-api-access-lbkrt\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:24 crc kubenswrapper[4929]: I1122 07:17:24.959280 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68254869-b4b2-4c18-980e-4c1ce48555fe-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.093647 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bjwv6" event={"ID":"68254869-b4b2-4c18-980e-4c1ce48555fe","Type":"ContainerDied","Data":"2cfbadde9c89afc3b12a408bca3a0653cf1e69d4e613922e7f9014682f7e770e"} Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.093697 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bjwv6" Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.093725 4929 scope.go:117] "RemoveContainer" containerID="4d9f062ebba0fbc85d675b3eded67bef5160bb03e715389e4ad6b9e748325e26" Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.530353 4929 scope.go:117] "RemoveContainer" containerID="013e0ed0a33d4804fafcb47022a808a0b64a633239fe2bef432b5f78c92bd984" Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.539924 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.544544 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bjwv6"] Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.567434 4929 scope.go:117] "RemoveContainer" containerID="487380db37c4eebc32c8e03fddacf74cddc690decbc8388a14d884e603b223e8" Nov 22 07:17:25 crc kubenswrapper[4929]: I1122 07:17:25.953705 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" path="/var/lib/kubelet/pods/68254869-b4b2-4c18-980e-4c1ce48555fe/volumes" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.052304 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" podUID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" containerName="oauth-openshift" containerID="cri-o://518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24" gracePeriod=15 Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.873169 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903606 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-75d6949b4b-j4b52"] Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903848 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903863 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903877 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903885 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903898 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" containerName="oauth-openshift" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903905 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" containerName="oauth-openshift" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903914 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e73fb56-aba3-457d-813f-2087e73a8ea3" containerName="collect-profiles" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903921 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e73fb56-aba3-457d-813f-2087e73a8ea3" containerName="collect-profiles" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903931 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903939 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903949 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903957 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903969 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.903978 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.903992 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904000 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904009 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904016 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904028 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904035 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904048 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904056 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904067 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904075 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="extract-utilities" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904086 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904094 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: E1122 07:17:45.904102 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904109 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="extract-content" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904240 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="68254869-b4b2-4c18-980e-4c1ce48555fe" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904255 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce22e4ba-966e-46dc-a50f-cc3905f3da7c" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904263 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ed8fd8e-4ded-4661-af4b-64c6e80cc613" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904273 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" containerName="oauth-openshift" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904281 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e73fb56-aba3-457d-813f-2087e73a8ea3" containerName="collect-profiles" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904290 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ddb775a-6361-405f-9ea9-63d22b9a4f79" containerName="registry-server" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.904764 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.922003 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-75d6949b4b-j4b52"] Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.929881 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.931336 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.931569 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.931625 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932069 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932108 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932136 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932161 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7j4rk\" (UniqueName: \"kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932192 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932231 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932256 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.932621 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.933064 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.933438 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.934325 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.934615 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.937454 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.937842 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938407 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template\") pod \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\" (UID: \"7ba449ab-e58d-4b27-9f93-8dd3d784a077\") " Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938746 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-service-ca\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938820 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-policies\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938918 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938965 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-router-certs\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.938996 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-cliconfig\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939028 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-dir\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939054 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939123 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ncmp\" (UniqueName: \"kubernetes.io/projected/a8b986fb-b396-40ed-9c60-b7f55b873991-kube-api-access-4ncmp\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939171 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-serving-cert\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939223 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939256 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939284 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-error\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939348 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-login\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939389 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-session\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939477 4929 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939498 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939511 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939525 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.939750 4929 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7ba449ab-e58d-4b27-9f93-8dd3d784a077-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.940844 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.941492 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.941938 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.942692 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk" (OuterVolumeSpecName: "kube-api-access-7j4rk") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "kube-api-access-7j4rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.949654 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.950002 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.950275 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.950519 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:45 crc kubenswrapper[4929]: I1122 07:17:45.950659 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "7ba449ab-e58d-4b27-9f93-8dd3d784a077" (UID: "7ba449ab-e58d-4b27-9f93-8dd3d784a077"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.041819 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-login\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.042592 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-session\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.042877 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-service-ca\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.042981 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-policies\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043085 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043162 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-router-certs\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043274 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-cliconfig\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043363 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-dir\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043450 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-dir\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043461 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043794 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ncmp\" (UniqueName: \"kubernetes.io/projected/a8b986fb-b396-40ed-9c60-b7f55b873991-kube-api-access-4ncmp\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043891 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-serving-cert\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.043948 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.045195 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-audit-policies\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.045327 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-service-ca\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.044012 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.045688 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-error\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.045853 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.046307 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-cliconfig\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047565 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047800 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047839 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047876 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047896 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047916 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047934 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047954 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7j4rk\" (UniqueName: \"kubernetes.io/projected/7ba449ab-e58d-4b27-9f93-8dd3d784a077-kube-api-access-7j4rk\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.047970 4929 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7ba449ab-e58d-4b27-9f93-8dd3d784a077-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.058448 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-serving-cert\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.059569 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.060580 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.061308 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-session\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.061388 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-router-certs\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.061856 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-login\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.062486 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-user-template-error\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.062551 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a8b986fb-b396-40ed-9c60-b7f55b873991-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.065911 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ncmp\" (UniqueName: \"kubernetes.io/projected/a8b986fb-b396-40ed-9c60-b7f55b873991-kube-api-access-4ncmp\") pod \"oauth-openshift-75d6949b4b-j4b52\" (UID: \"a8b986fb-b396-40ed-9c60-b7f55b873991\") " pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.204946 4929 generic.go:334] "Generic (PLEG): container finished" podID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" containerID="518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24" exitCode=0 Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.204991 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" event={"ID":"7ba449ab-e58d-4b27-9f93-8dd3d784a077","Type":"ContainerDied","Data":"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24"} Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.205039 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" event={"ID":"7ba449ab-e58d-4b27-9f93-8dd3d784a077","Type":"ContainerDied","Data":"f9a7df6ef31f81fd4ac409d18e4c80f1ab69dceee44a1018292881fcb1db595c"} Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.205059 4929 scope.go:117] "RemoveContainer" containerID="518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.205280 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-mp7vd" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.221966 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.228438 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.232326 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-mp7vd"] Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.233554 4929 scope.go:117] "RemoveContainer" containerID="518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24" Nov 22 07:17:46 crc kubenswrapper[4929]: E1122 07:17:46.234078 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24\": container with ID starting with 518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24 not found: ID does not exist" containerID="518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.234139 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24"} err="failed to get container status \"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24\": rpc error: code = NotFound desc = could not find container \"518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24\": container with ID starting with 518c8535ab2cd4ab09101b99516edbcd638ae7b2016af316a3fccfcdc9b4ae24 not found: ID does not exist" Nov 22 07:17:46 crc kubenswrapper[4929]: I1122 07:17:46.456103 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-75d6949b4b-j4b52"] Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.213068 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" event={"ID":"a8b986fb-b396-40ed-9c60-b7f55b873991","Type":"ContainerStarted","Data":"a9965a9f63825abaac8a4e5fd2eb231e0fc2bc47b6663083f64044ee156872b9"} Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.213371 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.213424 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" event={"ID":"a8b986fb-b396-40ed-9c60-b7f55b873991","Type":"ContainerStarted","Data":"a73535bbbc29395f2fcb3959070b13a8ed5b6e459d55823dac34baf79dd8321f"} Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.238939 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" podStartSLOduration=27.238918082 podStartE2EDuration="27.238918082s" podCreationTimestamp="2025-11-22 07:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:17:47.238196334 +0000 UTC m=+404.347650357" watchObservedRunningTime="2025-11-22 07:17:47.238918082 +0000 UTC m=+404.348372095" Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.298367 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-75d6949b4b-j4b52" Nov 22 07:17:47 crc kubenswrapper[4929]: I1122 07:17:47.956147 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ba449ab-e58d-4b27-9f93-8dd3d784a077" path="/var/lib/kubelet/pods/7ba449ab-e58d-4b27-9f93-8dd3d784a077/volumes" Nov 22 07:17:48 crc kubenswrapper[4929]: I1122 07:17:48.594611 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:17:48 crc kubenswrapper[4929]: I1122 07:17:48.594978 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.166989 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6qjj7"] Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.168134 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.182646 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6qjj7"] Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220016 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-trusted-ca\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220392 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3b3d7246-78a4-41a8-b503-6e36c46f681d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220498 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-bound-sa-token\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220583 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2x4s\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-kube-api-access-h2x4s\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220689 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220772 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3b3d7246-78a4-41a8-b503-6e36c46f681d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220875 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-certificates\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.220966 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-tls\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.261255 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-trusted-ca\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322527 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3b3d7246-78a4-41a8-b503-6e36c46f681d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322554 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-bound-sa-token\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322575 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2x4s\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-kube-api-access-h2x4s\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3b3d7246-78a4-41a8-b503-6e36c46f681d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322946 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-certificates\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.322981 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3b3d7246-78a4-41a8-b503-6e36c46f681d-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.323040 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-tls\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.323640 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-trusted-ca\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.323853 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-certificates\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.329486 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-registry-tls\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.329839 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3b3d7246-78a4-41a8-b503-6e36c46f681d-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.342593 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-bound-sa-token\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.353804 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2x4s\" (UniqueName: \"kubernetes.io/projected/3b3d7246-78a4-41a8-b503-6e36c46f681d-kube-api-access-h2x4s\") pod \"image-registry-66df7c8f76-6qjj7\" (UID: \"3b3d7246-78a4-41a8-b503-6e36c46f681d\") " pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.488279 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:51 crc kubenswrapper[4929]: I1122 07:17:51.676094 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6qjj7"] Nov 22 07:17:52 crc kubenswrapper[4929]: I1122 07:17:52.239192 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" event={"ID":"3b3d7246-78a4-41a8-b503-6e36c46f681d","Type":"ContainerStarted","Data":"ce8f3b4d19a7947f8e8189125059a96d4006dd5b1276199de36eb2d30b399dbe"} Nov 22 07:17:52 crc kubenswrapper[4929]: I1122 07:17:52.239554 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:17:52 crc kubenswrapper[4929]: I1122 07:17:52.239568 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" event={"ID":"3b3d7246-78a4-41a8-b503-6e36c46f681d","Type":"ContainerStarted","Data":"bcb7209b03567c6ce4d08330d9eee415f27039a0a21ca8df2a507b94b7bc5a31"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.020072 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" podStartSLOduration=8.020052574 podStartE2EDuration="8.020052574s" podCreationTimestamp="2025-11-22 07:17:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:17:52.258597094 +0000 UTC m=+409.368051127" watchObservedRunningTime="2025-11-22 07:17:59.020052574 +0000 UTC m=+416.129506587" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.023093 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.023400 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-94k7f" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="registry-server" containerID="cri-o://052dc6b9f731183b523bdf53cb6027f4fad177bd6ab456f77cd610d79f81ed81" gracePeriod=30 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.033812 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.034120 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6gdjz" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="registry-server" containerID="cri-o://47e807f3e5e6aa901c3b6158ce2635e04cb5c7491657465eda9313af9fd61e4c" gracePeriod=30 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.047183 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.047482 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" podUID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" containerName="marketplace-operator" containerID="cri-o://515788ae6ec9f8b4f46085bcfe41f5f3a1da3adff0bac2c67340ff4cb7ed66a8" gracePeriod=30 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.052752 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.053002 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4ddw7" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="registry-server" containerID="cri-o://7e670c1e1a819c53fe383961a81a2897ba788eb26d2f917f349c98588d28d8b0" gracePeriod=30 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.065333 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.065727 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tlqcg" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="registry-server" containerID="cri-o://e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" gracePeriod=30 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.070803 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ttdfk"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.071549 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.076863 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ttdfk"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.119807 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgdst\" (UniqueName: \"kubernetes.io/projected/bce25129-0d0f-4786-96a1-5c7b902e6d71-kube-api-access-hgdst\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.119858 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.119908 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.220562 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgdst\" (UniqueName: \"kubernetes.io/projected/bce25129-0d0f-4786-96a1-5c7b902e6d71-kube-api-access-hgdst\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.220955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.221009 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.222201 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.238363 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bce25129-0d0f-4786-96a1-5c7b902e6d71-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.239914 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgdst\" (UniqueName: \"kubernetes.io/projected/bce25129-0d0f-4786-96a1-5c7b902e6d71-kube-api-access-hgdst\") pod \"marketplace-operator-79b997595-ttdfk\" (UID: \"bce25129-0d0f-4786-96a1-5c7b902e6d71\") " pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: E1122 07:17:59.270414 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5 is running failed: container process not found" containerID="e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:17:59 crc kubenswrapper[4929]: E1122 07:17:59.270924 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5 is running failed: container process not found" containerID="e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:17:59 crc kubenswrapper[4929]: E1122 07:17:59.272377 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5 is running failed: container process not found" containerID="e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:17:59 crc kubenswrapper[4929]: E1122 07:17:59.272418 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-tlqcg" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="registry-server" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.277239 4929 generic.go:334] "Generic (PLEG): container finished" podID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" containerID="515788ae6ec9f8b4f46085bcfe41f5f3a1da3adff0bac2c67340ff4cb7ed66a8" exitCode=0 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.277330 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" event={"ID":"d6f53ed8-9185-4537-a9d0-ef9176e61bd4","Type":"ContainerDied","Data":"515788ae6ec9f8b4f46085bcfe41f5f3a1da3adff0bac2c67340ff4cb7ed66a8"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.279182 4929 generic.go:334] "Generic (PLEG): container finished" podID="397a142d-10a3-47ad-a662-b8effcccd19d" containerID="7e670c1e1a819c53fe383961a81a2897ba788eb26d2f917f349c98588d28d8b0" exitCode=0 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.279259 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerDied","Data":"7e670c1e1a819c53fe383961a81a2897ba788eb26d2f917f349c98588d28d8b0"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.283068 4929 generic.go:334] "Generic (PLEG): container finished" podID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerID="e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" exitCode=0 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.283137 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerDied","Data":"e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.285869 4929 generic.go:334] "Generic (PLEG): container finished" podID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerID="47e807f3e5e6aa901c3b6158ce2635e04cb5c7491657465eda9313af9fd61e4c" exitCode=0 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.285940 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerDied","Data":"47e807f3e5e6aa901c3b6158ce2635e04cb5c7491657465eda9313af9fd61e4c"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.296002 4929 generic.go:334] "Generic (PLEG): container finished" podID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerID="052dc6b9f731183b523bdf53cb6027f4fad177bd6ab456f77cd610d79f81ed81" exitCode=0 Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.296053 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerDied","Data":"052dc6b9f731183b523bdf53cb6027f4fad177bd6ab456f77cd610d79f81ed81"} Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.392018 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.590135 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.608436 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.609654 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.624642 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.632392 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.662429 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ttdfk"] Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766419 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics\") pod \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766465 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities\") pod \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766518 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content\") pod \"ffefae1e-6d13-4fe0-bc00-d9d828551582\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766544 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content\") pod \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766570 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dzh6\" (UniqueName: \"kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6\") pod \"397a142d-10a3-47ad-a662-b8effcccd19d\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766588 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dh2mw\" (UniqueName: \"kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw\") pod \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\" (UID: \"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766609 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkpkg\" (UniqueName: \"kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg\") pod \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766636 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca\") pod \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\" (UID: \"d6f53ed8-9185-4537-a9d0-ef9176e61bd4\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766664 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94hdq\" (UniqueName: \"kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq\") pod \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766708 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftqds\" (UniqueName: \"kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds\") pod \"ffefae1e-6d13-4fe0-bc00-d9d828551582\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766741 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content\") pod \"397a142d-10a3-47ad-a662-b8effcccd19d\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766764 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content\") pod \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766794 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities\") pod \"397a142d-10a3-47ad-a662-b8effcccd19d\" (UID: \"397a142d-10a3-47ad-a662-b8effcccd19d\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766829 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities\") pod \"ffefae1e-6d13-4fe0-bc00-d9d828551582\" (UID: \"ffefae1e-6d13-4fe0-bc00-d9d828551582\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.766847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities\") pod \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\" (UID: \"6e8c08c4-b36e-49ee-bdfe-003d398a267e\") " Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.767371 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities" (OuterVolumeSpecName: "utilities") pod "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" (UID: "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.767793 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d6f53ed8-9185-4537-a9d0-ef9176e61bd4" (UID: "d6f53ed8-9185-4537-a9d0-ef9176e61bd4"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.768086 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities" (OuterVolumeSpecName: "utilities") pod "6e8c08c4-b36e-49ee-bdfe-003d398a267e" (UID: "6e8c08c4-b36e-49ee-bdfe-003d398a267e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.771744 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities" (OuterVolumeSpecName: "utilities") pod "ffefae1e-6d13-4fe0-bc00-d9d828551582" (UID: "ffefae1e-6d13-4fe0-bc00-d9d828551582"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.771991 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d6f53ed8-9185-4537-a9d0-ef9176e61bd4" (UID: "d6f53ed8-9185-4537-a9d0-ef9176e61bd4"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.772438 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6" (OuterVolumeSpecName: "kube-api-access-4dzh6") pod "397a142d-10a3-47ad-a662-b8effcccd19d" (UID: "397a142d-10a3-47ad-a662-b8effcccd19d"). InnerVolumeSpecName "kube-api-access-4dzh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.772972 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq" (OuterVolumeSpecName: "kube-api-access-94hdq") pod "6e8c08c4-b36e-49ee-bdfe-003d398a267e" (UID: "6e8c08c4-b36e-49ee-bdfe-003d398a267e"). InnerVolumeSpecName "kube-api-access-94hdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.773310 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw" (OuterVolumeSpecName: "kube-api-access-dh2mw") pod "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" (UID: "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd"). InnerVolumeSpecName "kube-api-access-dh2mw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.775333 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg" (OuterVolumeSpecName: "kube-api-access-mkpkg") pod "d6f53ed8-9185-4537-a9d0-ef9176e61bd4" (UID: "d6f53ed8-9185-4537-a9d0-ef9176e61bd4"). InnerVolumeSpecName "kube-api-access-mkpkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.782853 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities" (OuterVolumeSpecName: "utilities") pod "397a142d-10a3-47ad-a662-b8effcccd19d" (UID: "397a142d-10a3-47ad-a662-b8effcccd19d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.787443 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds" (OuterVolumeSpecName: "kube-api-access-ftqds") pod "ffefae1e-6d13-4fe0-bc00-d9d828551582" (UID: "ffefae1e-6d13-4fe0-bc00-d9d828551582"). InnerVolumeSpecName "kube-api-access-ftqds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.804118 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "397a142d-10a3-47ad-a662-b8effcccd19d" (UID: "397a142d-10a3-47ad-a662-b8effcccd19d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.837354 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ffefae1e-6d13-4fe0-bc00-d9d828551582" (UID: "ffefae1e-6d13-4fe0-bc00-d9d828551582"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.839434 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" (UID: "3dd2a0c6-cab8-4398-b9ce-30fc74285bfd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868228 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868264 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dh2mw\" (UniqueName: \"kubernetes.io/projected/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-kube-api-access-dh2mw\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868280 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dzh6\" (UniqueName: \"kubernetes.io/projected/397a142d-10a3-47ad-a662-b8effcccd19d-kube-api-access-4dzh6\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868288 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkpkg\" (UniqueName: \"kubernetes.io/projected/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-kube-api-access-mkpkg\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868314 4929 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868323 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94hdq\" (UniqueName: \"kubernetes.io/projected/6e8c08c4-b36e-49ee-bdfe-003d398a267e-kube-api-access-94hdq\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868331 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftqds\" (UniqueName: \"kubernetes.io/projected/ffefae1e-6d13-4fe0-bc00-d9d828551582-kube-api-access-ftqds\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868339 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868349 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/397a142d-10a3-47ad-a662-b8effcccd19d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868357 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868364 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868388 4929 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d6f53ed8-9185-4537-a9d0-ef9176e61bd4-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868396 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.868404 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ffefae1e-6d13-4fe0-bc00-d9d828551582-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.926062 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6e8c08c4-b36e-49ee-bdfe-003d398a267e" (UID: "6e8c08c4-b36e-49ee-bdfe-003d398a267e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:17:59 crc kubenswrapper[4929]: I1122 07:17:59.969812 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e8c08c4-b36e-49ee-bdfe-003d398a267e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.302434 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-94k7f" event={"ID":"ffefae1e-6d13-4fe0-bc00-d9d828551582","Type":"ContainerDied","Data":"f054230528b8bb316e974fb6f0faef66597bfdcbd7d5c2e5f5de3ca1514de9b9"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.302497 4929 scope.go:117] "RemoveContainer" containerID="052dc6b9f731183b523bdf53cb6027f4fad177bd6ab456f77cd610d79f81ed81" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.302525 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-94k7f" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.303809 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.303806 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qp8xw" event={"ID":"d6f53ed8-9185-4537-a9d0-ef9176e61bd4","Type":"ContainerDied","Data":"c7b7a36c6f439687fef254716288fb4ecd8aea0cc79b8bc1d4f2ccd88c7bc903"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.307620 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ddw7" event={"ID":"397a142d-10a3-47ad-a662-b8effcccd19d","Type":"ContainerDied","Data":"82c3fcccb78a28ca52d1eedd54dd62f55506e6ed9641adc23a4aad83bf79db34"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.307783 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ddw7" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.314920 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlqcg" event={"ID":"6e8c08c4-b36e-49ee-bdfe-003d398a267e","Type":"ContainerDied","Data":"f632d41e546b7d200ee434d04bc14c734d39b38be85b450f3623b9ff5babafc5"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.314976 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlqcg" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.316368 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" event={"ID":"bce25129-0d0f-4786-96a1-5c7b902e6d71","Type":"ContainerStarted","Data":"4dfe9b00f0209479dab3169500afe4dd0f93dc87942e30155e32dddc7298ee4d"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.316396 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" event={"ID":"bce25129-0d0f-4786-96a1-5c7b902e6d71","Type":"ContainerStarted","Data":"afaa68a18639be414acb2af450e01df127a331f905e9f1cdfc72ea562af5a64e"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.316865 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.318912 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6gdjz" event={"ID":"3dd2a0c6-cab8-4398-b9ce-30fc74285bfd","Type":"ContainerDied","Data":"ee2fd9339c5d6a80c1cb17504356f00923c61dd4de6fc2ae2ee82f4230ccc09d"} Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.318982 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6gdjz" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.319104 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.319930 4929 scope.go:117] "RemoveContainer" containerID="6d603b7aa8ee36ad5d179463a75b534c83e3fac7ed352778f64447c13b4d0022" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.326836 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.331747 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-94k7f"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.340100 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.348705 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qp8xw"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.358110 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.360483 4929 scope.go:117] "RemoveContainer" containerID="fb07a45f5b7ed348cfbd6c5630c823c0a53f29e76987c91c302202301347d240" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.362023 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ddw7"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.366903 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.371721 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6gdjz"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.378108 4929 scope.go:117] "RemoveContainer" containerID="515788ae6ec9f8b4f46085bcfe41f5f3a1da3adff0bac2c67340ff4cb7ed66a8" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.382261 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ttdfk" podStartSLOduration=1.382249383 podStartE2EDuration="1.382249383s" podCreationTimestamp="2025-11-22 07:17:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:18:00.378398466 +0000 UTC m=+417.487852479" watchObservedRunningTime="2025-11-22 07:18:00.382249383 +0000 UTC m=+417.491703396" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.393029 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.399766 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tlqcg"] Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.404862 4929 scope.go:117] "RemoveContainer" containerID="7e670c1e1a819c53fe383961a81a2897ba788eb26d2f917f349c98588d28d8b0" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.418570 4929 scope.go:117] "RemoveContainer" containerID="5d44d91e5de779607db10306a1ab49cfd97189097bab2e95ab823394967ffec0" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.431122 4929 scope.go:117] "RemoveContainer" containerID="46cd5ead3cd60056da33216ada4d7ee7642ce5e1a3dd4be1eda1ff7f11c94090" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.453462 4929 scope.go:117] "RemoveContainer" containerID="e31cf1b731c4e899af8438b0fc89ca5e28edf16b3fbf8b22dab7a36b0cd4fbe5" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.475527 4929 scope.go:117] "RemoveContainer" containerID="c1d2ab4ecbecca89bdabb5a071b3b388105aae750ba91c4e0981a53c00ce44ed" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.491304 4929 scope.go:117] "RemoveContainer" containerID="6db0c80e2f56e0abe718e4791b3c1d4aeb54cdc910bc925b577e203058a0874c" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.506983 4929 scope.go:117] "RemoveContainer" containerID="47e807f3e5e6aa901c3b6158ce2635e04cb5c7491657465eda9313af9fd61e4c" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.519798 4929 scope.go:117] "RemoveContainer" containerID="45304dde650a3185710803e03bef2556a023909efee959c0ff7b1a8cd3a36867" Nov 22 07:18:00 crc kubenswrapper[4929]: I1122 07:18:00.542567 4929 scope.go:117] "RemoveContainer" containerID="c11ee559c0fd9c818c16bcd10580fd97f2d2a0d091d22a40f5fe9ae2b91860c4" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.241183 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jpvhx"] Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.241732 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.241842 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.242030 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.242120 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.242225 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.242319 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.242400 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.242477 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.242587 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.242880 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.242988 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.243067 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.243165 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.243266 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.243477 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.243571 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.243659 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.243737 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="extract-content" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.243817 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.243899 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.243988 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244066 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.244156 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" containerName="marketplace-operator" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244286 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" containerName="marketplace-operator" Nov 22 07:18:01 crc kubenswrapper[4929]: E1122 07:18:01.244434 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244521 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="extract-utilities" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244760 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" containerName="marketplace-operator" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244893 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.244995 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.245126 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.245227 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" containerName="registry-server" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.246541 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.250966 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.254718 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpvhx"] Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.390377 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-utilities\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.390439 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-catalog-content\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.390511 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fq6pp\" (UniqueName: \"kubernetes.io/projected/f15451b8-3b4b-4f3b-b85c-0876b352e959-kube-api-access-fq6pp\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.460000 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cwsj2"] Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.462605 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.465864 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwsj2"] Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.466064 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.491202 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fq6pp\" (UniqueName: \"kubernetes.io/projected/f15451b8-3b4b-4f3b-b85c-0876b352e959-kube-api-access-fq6pp\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.491346 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-utilities\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.491373 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-catalog-content\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.491904 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-catalog-content\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.492245 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f15451b8-3b4b-4f3b-b85c-0876b352e959-utilities\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.509103 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fq6pp\" (UniqueName: \"kubernetes.io/projected/f15451b8-3b4b-4f3b-b85c-0876b352e959-kube-api-access-fq6pp\") pod \"redhat-marketplace-jpvhx\" (UID: \"f15451b8-3b4b-4f3b-b85c-0876b352e959\") " pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.578110 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.592414 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnsl2\" (UniqueName: \"kubernetes.io/projected/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-kube-api-access-bnsl2\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.592494 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-catalog-content\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.592544 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-utilities\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.694690 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnsl2\" (UniqueName: \"kubernetes.io/projected/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-kube-api-access-bnsl2\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.695306 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-catalog-content\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.695357 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-utilities\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.697132 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-catalog-content\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.697882 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-utilities\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.718239 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnsl2\" (UniqueName: \"kubernetes.io/projected/ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2-kube-api-access-bnsl2\") pod \"redhat-operators-cwsj2\" (UID: \"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2\") " pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.780560 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.957173 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="397a142d-10a3-47ad-a662-b8effcccd19d" path="/var/lib/kubelet/pods/397a142d-10a3-47ad-a662-b8effcccd19d/volumes" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.962652 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3dd2a0c6-cab8-4398-b9ce-30fc74285bfd" path="/var/lib/kubelet/pods/3dd2a0c6-cab8-4398-b9ce-30fc74285bfd/volumes" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.963537 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e8c08c4-b36e-49ee-bdfe-003d398a267e" path="/var/lib/kubelet/pods/6e8c08c4-b36e-49ee-bdfe-003d398a267e/volumes" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.965360 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6f53ed8-9185-4537-a9d0-ef9176e61bd4" path="/var/lib/kubelet/pods/d6f53ed8-9185-4537-a9d0-ef9176e61bd4/volumes" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.966576 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffefae1e-6d13-4fe0-bc00-d9d828551582" path="/var/lib/kubelet/pods/ffefae1e-6d13-4fe0-bc00-d9d828551582/volumes" Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.967630 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwsj2"] Nov 22 07:18:01 crc kubenswrapper[4929]: I1122 07:18:01.990200 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpvhx"] Nov 22 07:18:02 crc kubenswrapper[4929]: W1122 07:18:02.002804 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf15451b8_3b4b_4f3b_b85c_0876b352e959.slice/crio-a43493618eb59038d810da3852289a55a4b997ea955c46ba333466a281207f48 WatchSource:0}: Error finding container a43493618eb59038d810da3852289a55a4b997ea955c46ba333466a281207f48: Status 404 returned error can't find the container with id a43493618eb59038d810da3852289a55a4b997ea955c46ba333466a281207f48 Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.337825 4929 generic.go:334] "Generic (PLEG): container finished" podID="f15451b8-3b4b-4f3b-b85c-0876b352e959" containerID="3354421223c9e38b796c59684be6cd6ba181913c3888f65f65a29d33f27e28c2" exitCode=0 Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.337897 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpvhx" event={"ID":"f15451b8-3b4b-4f3b-b85c-0876b352e959","Type":"ContainerDied","Data":"3354421223c9e38b796c59684be6cd6ba181913c3888f65f65a29d33f27e28c2"} Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.338284 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpvhx" event={"ID":"f15451b8-3b4b-4f3b-b85c-0876b352e959","Type":"ContainerStarted","Data":"a43493618eb59038d810da3852289a55a4b997ea955c46ba333466a281207f48"} Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.339929 4929 generic.go:334] "Generic (PLEG): container finished" podID="ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2" containerID="ff00f901c75aeb7af6f35336e7f9771e8b0b12e862f0455c9ab9d73a2dac3a21" exitCode=0 Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.340020 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwsj2" event={"ID":"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2","Type":"ContainerDied","Data":"ff00f901c75aeb7af6f35336e7f9771e8b0b12e862f0455c9ab9d73a2dac3a21"} Nov 22 07:18:02 crc kubenswrapper[4929]: I1122 07:18:02.340077 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwsj2" event={"ID":"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2","Type":"ContainerStarted","Data":"8fdd7cc18e4698ac206d88f159d516e1384949c3a567d345d5d43f355b2ac9b9"} Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.365253 4929 generic.go:334] "Generic (PLEG): container finished" podID="f15451b8-3b4b-4f3b-b85c-0876b352e959" containerID="17db8f8e8362c16e9fcf9174e38ce281fe4cb903af3bc6d7973c2efd7309f315" exitCode=0 Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.365364 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpvhx" event={"ID":"f15451b8-3b4b-4f3b-b85c-0876b352e959","Type":"ContainerDied","Data":"17db8f8e8362c16e9fcf9174e38ce281fe4cb903af3bc6d7973c2efd7309f315"} Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.369649 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwsj2" event={"ID":"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2","Type":"ContainerStarted","Data":"693b590982b53c21dd37f111f56aa2a7f2e845440509fe77dde7094987c57cc3"} Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.645318 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.646335 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.648275 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.664487 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.827979 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.828108 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.828159 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjf7h\" (UniqueName: \"kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.848040 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-b658w"] Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.852416 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.854778 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b658w"] Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.856148 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.929117 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.929532 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjf7h\" (UniqueName: \"kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.929668 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.929809 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.930197 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.952330 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjf7h\" (UniqueName: \"kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h\") pod \"community-operators-sw9px\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.963932 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 07:18:03 crc kubenswrapper[4929]: I1122 07:18:03.972200 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.031514 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-utilities\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.032002 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7tnf\" (UniqueName: \"kubernetes.io/projected/12d2673e-b955-421a-839a-56222dc85e7b-kube-api-access-c7tnf\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.032168 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-catalog-content\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.133684 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-utilities\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.134916 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-utilities\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.135100 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7tnf\" (UniqueName: \"kubernetes.io/projected/12d2673e-b955-421a-839a-56222dc85e7b-kube-api-access-c7tnf\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.135609 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-catalog-content\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.135667 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12d2673e-b955-421a-839a-56222dc85e7b-catalog-content\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.156263 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7tnf\" (UniqueName: \"kubernetes.io/projected/12d2673e-b955-421a-839a-56222dc85e7b-kube-api-access-c7tnf\") pod \"certified-operators-b658w\" (UID: \"12d2673e-b955-421a-839a-56222dc85e7b\") " pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.202515 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.204082 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:04 crc kubenswrapper[4929]: W1122 07:18:04.208328 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29c338ed_e46c_47a4_bdfc_826204c08156.slice/crio-e9570e9079cec9aa29e66874fd2bff562e3656eeacb2b3f9be6127beb352943d WatchSource:0}: Error finding container e9570e9079cec9aa29e66874fd2bff562e3656eeacb2b3f9be6127beb352943d: Status 404 returned error can't find the container with id e9570e9079cec9aa29e66874fd2bff562e3656eeacb2b3f9be6127beb352943d Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.379195 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerStarted","Data":"e9570e9079cec9aa29e66874fd2bff562e3656eeacb2b3f9be6127beb352943d"} Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.382045 4929 generic.go:334] "Generic (PLEG): container finished" podID="ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2" containerID="693b590982b53c21dd37f111f56aa2a7f2e845440509fe77dde7094987c57cc3" exitCode=0 Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.382083 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwsj2" event={"ID":"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2","Type":"ContainerDied","Data":"693b590982b53c21dd37f111f56aa2a7f2e845440509fe77dde7094987c57cc3"} Nov 22 07:18:04 crc kubenswrapper[4929]: I1122 07:18:04.408419 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-b658w"] Nov 22 07:18:04 crc kubenswrapper[4929]: W1122 07:18:04.412123 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12d2673e_b955_421a_839a_56222dc85e7b.slice/crio-5d3e00a87bab4b662535b878cc5a1348569e3b91c57395621b4c074d6eb91319 WatchSource:0}: Error finding container 5d3e00a87bab4b662535b878cc5a1348569e3b91c57395621b4c074d6eb91319: Status 404 returned error can't find the container with id 5d3e00a87bab4b662535b878cc5a1348569e3b91c57395621b4c074d6eb91319 Nov 22 07:18:05 crc kubenswrapper[4929]: I1122 07:18:05.390032 4929 generic.go:334] "Generic (PLEG): container finished" podID="29c338ed-e46c-47a4-bdfc-826204c08156" containerID="f2c70cab1114248f2e7f8cc461972afe38a0c74ffb623332951a66b7044e75e0" exitCode=0 Nov 22 07:18:05 crc kubenswrapper[4929]: I1122 07:18:05.390083 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerDied","Data":"f2c70cab1114248f2e7f8cc461972afe38a0c74ffb623332951a66b7044e75e0"} Nov 22 07:18:05 crc kubenswrapper[4929]: I1122 07:18:05.392457 4929 generic.go:334] "Generic (PLEG): container finished" podID="12d2673e-b955-421a-839a-56222dc85e7b" containerID="cdd8daa0f81b8ce0486fa44a9b83161a45947057ed3e3a93a365f8d8e88ee94c" exitCode=0 Nov 22 07:18:05 crc kubenswrapper[4929]: I1122 07:18:05.392487 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b658w" event={"ID":"12d2673e-b955-421a-839a-56222dc85e7b","Type":"ContainerDied","Data":"cdd8daa0f81b8ce0486fa44a9b83161a45947057ed3e3a93a365f8d8e88ee94c"} Nov 22 07:18:05 crc kubenswrapper[4929]: I1122 07:18:05.392507 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b658w" event={"ID":"12d2673e-b955-421a-839a-56222dc85e7b","Type":"ContainerStarted","Data":"5d3e00a87bab4b662535b878cc5a1348569e3b91c57395621b4c074d6eb91319"} Nov 22 07:18:06 crc kubenswrapper[4929]: I1122 07:18:06.398191 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwsj2" event={"ID":"ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2","Type":"ContainerStarted","Data":"8d3156f20a49afeb6fb1f90eef57d0d33d301a0ec84801ecffd167bdf999e906"} Nov 22 07:18:06 crc kubenswrapper[4929]: I1122 07:18:06.400941 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpvhx" event={"ID":"f15451b8-3b4b-4f3b-b85c-0876b352e959","Type":"ContainerStarted","Data":"d65bbd0cdf31c615efe0349329dc83cd655a872c40ce87940a94ba43580b59e8"} Nov 22 07:18:06 crc kubenswrapper[4929]: I1122 07:18:06.416682 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cwsj2" podStartSLOduration=2.34569632 podStartE2EDuration="5.416653369s" podCreationTimestamp="2025-11-22 07:18:01 +0000 UTC" firstStartedPulling="2025-11-22 07:18:02.341584352 +0000 UTC m=+419.451038365" lastFinishedPulling="2025-11-22 07:18:05.412541401 +0000 UTC m=+422.521995414" observedRunningTime="2025-11-22 07:18:06.414116036 +0000 UTC m=+423.523570049" watchObservedRunningTime="2025-11-22 07:18:06.416653369 +0000 UTC m=+423.526107382" Nov 22 07:18:06 crc kubenswrapper[4929]: I1122 07:18:06.434590 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jpvhx" podStartSLOduration=2.571614231 podStartE2EDuration="5.434568709s" podCreationTimestamp="2025-11-22 07:18:01 +0000 UTC" firstStartedPulling="2025-11-22 07:18:02.339719735 +0000 UTC m=+419.449173748" lastFinishedPulling="2025-11-22 07:18:05.202674203 +0000 UTC m=+422.312128226" observedRunningTime="2025-11-22 07:18:06.430975149 +0000 UTC m=+423.540429172" watchObservedRunningTime="2025-11-22 07:18:06.434568709 +0000 UTC m=+423.544022722" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.428005 4929 generic.go:334] "Generic (PLEG): container finished" podID="29c338ed-e46c-47a4-bdfc-826204c08156" containerID="01da40ba0a21ed092a8532abdd6c57e40fc13ef453e926f1a32a3fdb752acad9" exitCode=0 Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.428121 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerDied","Data":"01da40ba0a21ed092a8532abdd6c57e40fc13ef453e926f1a32a3fdb752acad9"} Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.432991 4929 generic.go:334] "Generic (PLEG): container finished" podID="12d2673e-b955-421a-839a-56222dc85e7b" containerID="74ea4ff84549bab894f468b37b39f407ef27e3d37fed9c40b796ddd70c3abdd7" exitCode=0 Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.433062 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b658w" event={"ID":"12d2673e-b955-421a-839a-56222dc85e7b","Type":"ContainerDied","Data":"74ea4ff84549bab894f468b37b39f407ef27e3d37fed9c40b796ddd70c3abdd7"} Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.492632 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-6qjj7" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.580499 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.580923 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.583720 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.629050 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.780993 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.781382 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:11 crc kubenswrapper[4929]: I1122 07:18:11.824778 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:12 crc kubenswrapper[4929]: I1122 07:18:12.475979 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jpvhx" Nov 22 07:18:12 crc kubenswrapper[4929]: I1122 07:18:12.477081 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cwsj2" Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.447440 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-b658w" event={"ID":"12d2673e-b955-421a-839a-56222dc85e7b","Type":"ContainerStarted","Data":"2daab2e4670083aa368da4b7f9d243b39cd146544e0f19380e6602fa5b5b927f"} Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.453311 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerStarted","Data":"018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955"} Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.471932 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-b658w" podStartSLOduration=3.356662414 podStartE2EDuration="10.471908107s" podCreationTimestamp="2025-11-22 07:18:03 +0000 UTC" firstStartedPulling="2025-11-22 07:18:05.411958337 +0000 UTC m=+422.521412360" lastFinishedPulling="2025-11-22 07:18:12.52720403 +0000 UTC m=+429.636658053" observedRunningTime="2025-11-22 07:18:13.468628134 +0000 UTC m=+430.578082167" watchObservedRunningTime="2025-11-22 07:18:13.471908107 +0000 UTC m=+430.581362120" Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.497554 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-sw9px" podStartSLOduration=2.885130897 podStartE2EDuration="10.4975237s" podCreationTimestamp="2025-11-22 07:18:03 +0000 UTC" firstStartedPulling="2025-11-22 07:18:05.41326676 +0000 UTC m=+422.522720773" lastFinishedPulling="2025-11-22 07:18:13.025659563 +0000 UTC m=+430.135113576" observedRunningTime="2025-11-22 07:18:13.493290144 +0000 UTC m=+430.602744157" watchObservedRunningTime="2025-11-22 07:18:13.4975237 +0000 UTC m=+430.606977713" Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.973263 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:13 crc kubenswrapper[4929]: I1122 07:18:13.973308 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:14 crc kubenswrapper[4929]: I1122 07:18:14.205448 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:14 crc kubenswrapper[4929]: I1122 07:18:14.205875 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:14 crc kubenswrapper[4929]: I1122 07:18:14.252181 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:15 crc kubenswrapper[4929]: I1122 07:18:15.007047 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-sw9px" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" probeResult="failure" output=< Nov 22 07:18:15 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:18:15 crc kubenswrapper[4929]: > Nov 22 07:18:18 crc kubenswrapper[4929]: I1122 07:18:18.594408 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:18:18 crc kubenswrapper[4929]: I1122 07:18:18.594471 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:18:24 crc kubenswrapper[4929]: I1122 07:18:24.019043 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:24 crc kubenswrapper[4929]: I1122 07:18:24.069736 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:18:24 crc kubenswrapper[4929]: I1122 07:18:24.252551 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-b658w" Nov 22 07:18:36 crc kubenswrapper[4929]: I1122 07:18:36.624970 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" podUID="6776df8d-1529-41a1-9474-d368b6631779" containerName="registry" containerID="cri-o://81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a" gracePeriod=30 Nov 22 07:18:36 crc kubenswrapper[4929]: I1122 07:18:36.946374 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.011802 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.011855 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.011885 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.011926 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.011963 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.012002 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.012021 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwrqh\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.012038 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca\") pod \"6776df8d-1529-41a1-9474-d368b6631779\" (UID: \"6776df8d-1529-41a1-9474-d368b6631779\") " Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.012979 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.013280 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.017984 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.018130 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.021561 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.021883 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh" (OuterVolumeSpecName: "kube-api-access-wwrqh") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "kube-api-access-wwrqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.026061 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.030313 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "6776df8d-1529-41a1-9474-d368b6631779" (UID: "6776df8d-1529-41a1-9474-d368b6631779"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113744 4929 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113806 4929 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/6776df8d-1529-41a1-9474-d368b6631779-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113816 4929 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/6776df8d-1529-41a1-9474-d368b6631779-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113826 4929 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113837 4929 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113847 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwrqh\" (UniqueName: \"kubernetes.io/projected/6776df8d-1529-41a1-9474-d368b6631779-kube-api-access-wwrqh\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.113856 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6776df8d-1529-41a1-9474-d368b6631779-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.575399 4929 generic.go:334] "Generic (PLEG): container finished" podID="6776df8d-1529-41a1-9474-d368b6631779" containerID="81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a" exitCode=0 Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.575463 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" event={"ID":"6776df8d-1529-41a1-9474-d368b6631779","Type":"ContainerDied","Data":"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a"} Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.575723 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" event={"ID":"6776df8d-1529-41a1-9474-d368b6631779","Type":"ContainerDied","Data":"65382d8278e021cb0fe9453294bb0a4f0de3bd396ccbb4ea0ea3fe94d04bb6b2"} Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.575744 4929 scope.go:117] "RemoveContainer" containerID="81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.575490 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-f5nrw" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.592746 4929 scope.go:117] "RemoveContainer" containerID="81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a" Nov 22 07:18:37 crc kubenswrapper[4929]: E1122 07:18:37.593185 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a\": container with ID starting with 81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a not found: ID does not exist" containerID="81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.593232 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a"} err="failed to get container status \"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a\": rpc error: code = NotFound desc = could not find container \"81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a\": container with ID starting with 81faee2aa99235ca1861e169ed088fabdb0b08371ee9a383fcb53d870a22819a not found: ID does not exist" Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.602280 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.606320 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-f5nrw"] Nov 22 07:18:37 crc kubenswrapper[4929]: I1122 07:18:37.955183 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6776df8d-1529-41a1-9474-d368b6631779" path="/var/lib/kubelet/pods/6776df8d-1529-41a1-9474-d368b6631779/volumes" Nov 22 07:18:48 crc kubenswrapper[4929]: I1122 07:18:48.594073 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:18:48 crc kubenswrapper[4929]: I1122 07:18:48.594496 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:18:48 crc kubenswrapper[4929]: I1122 07:18:48.594541 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:18:48 crc kubenswrapper[4929]: I1122 07:18:48.595117 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:18:48 crc kubenswrapper[4929]: I1122 07:18:48.595163 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb" gracePeriod=600 Nov 22 07:18:49 crc kubenswrapper[4929]: I1122 07:18:49.644410 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb" exitCode=0 Nov 22 07:18:49 crc kubenswrapper[4929]: I1122 07:18:49.644480 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb"} Nov 22 07:18:49 crc kubenswrapper[4929]: I1122 07:18:49.646353 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c"} Nov 22 07:18:49 crc kubenswrapper[4929]: I1122 07:18:49.646389 4929 scope.go:117] "RemoveContainer" containerID="0a7dac2ee99e537745e22031c6135d4c0a2f6ba984d7d5768002380872b38a7e" Nov 22 07:21:18 crc kubenswrapper[4929]: I1122 07:21:18.595421 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:21:18 crc kubenswrapper[4929]: I1122 07:21:18.595852 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:21:48 crc kubenswrapper[4929]: I1122 07:21:48.594284 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:21:48 crc kubenswrapper[4929]: I1122 07:21:48.594919 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:22:18 crc kubenswrapper[4929]: I1122 07:22:18.594516 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:22:18 crc kubenswrapper[4929]: I1122 07:22:18.595189 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:22:18 crc kubenswrapper[4929]: I1122 07:22:18.595272 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:22:18 crc kubenswrapper[4929]: I1122 07:22:18.595985 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:22:18 crc kubenswrapper[4929]: I1122 07:22:18.596075 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c" gracePeriod=600 Nov 22 07:22:19 crc kubenswrapper[4929]: I1122 07:22:19.831525 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c" exitCode=0 Nov 22 07:22:19 crc kubenswrapper[4929]: I1122 07:22:19.831598 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c"} Nov 22 07:22:19 crc kubenswrapper[4929]: I1122 07:22:19.831856 4929 scope.go:117] "RemoveContainer" containerID="a46bacf3884d0eb2dd611997e418aa6922cdfa160410640c54f9f6eaaa0821fb" Nov 22 07:22:20 crc kubenswrapper[4929]: I1122 07:22:20.837809 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527"} Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.079259 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.079827 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" containerID="cri-o://60be21b1a40acd44b688d3a9dd22c032d3408875ffe72c6aa9b2b31f90e40903" gracePeriod=30 Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.183044 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.183334 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" containerID="cri-o://76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4" gracePeriod=30 Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.227728 4929 generic.go:334] "Generic (PLEG): container finished" podID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerID="60be21b1a40acd44b688d3a9dd22c032d3408875ffe72c6aa9b2b31f90e40903" exitCode=0 Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.227782 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" event={"ID":"a917968f-39da-4359-a8cc-3f3bf28e5ab6","Type":"ContainerDied","Data":"60be21b1a40acd44b688d3a9dd22c032d3408875ffe72c6aa9b2b31f90e40903"} Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.512746 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.613144 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles\") pod \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.613224 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7x26\" (UniqueName: \"kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26\") pod \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.613271 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config\") pod \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.613348 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca\") pod \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.613432 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert\") pod \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\" (UID: \"a917968f-39da-4359-a8cc-3f3bf28e5ab6\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.614120 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca" (OuterVolumeSpecName: "client-ca") pod "a917968f-39da-4359-a8cc-3f3bf28e5ab6" (UID: "a917968f-39da-4359-a8cc-3f3bf28e5ab6"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.614128 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config" (OuterVolumeSpecName: "config") pod "a917968f-39da-4359-a8cc-3f3bf28e5ab6" (UID: "a917968f-39da-4359-a8cc-3f3bf28e5ab6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.614557 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a917968f-39da-4359-a8cc-3f3bf28e5ab6" (UID: "a917968f-39da-4359-a8cc-3f3bf28e5ab6"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.618951 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.620255 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a917968f-39da-4359-a8cc-3f3bf28e5ab6" (UID: "a917968f-39da-4359-a8cc-3f3bf28e5ab6"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.620368 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26" (OuterVolumeSpecName: "kube-api-access-d7x26") pod "a917968f-39da-4359-a8cc-3f3bf28e5ab6" (UID: "a917968f-39da-4359-a8cc-3f3bf28e5ab6"). InnerVolumeSpecName "kube-api-access-d7x26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714201 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca\") pod \"5f87df65-8531-414a-83a3-b4fb6c5059f9\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714277 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config\") pod \"5f87df65-8531-414a-83a3-b4fb6c5059f9\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714331 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6bt2x\" (UniqueName: \"kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x\") pod \"5f87df65-8531-414a-83a3-b4fb6c5059f9\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714368 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert\") pod \"5f87df65-8531-414a-83a3-b4fb6c5059f9\" (UID: \"5f87df65-8531-414a-83a3-b4fb6c5059f9\") " Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714592 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a917968f-39da-4359-a8cc-3f3bf28e5ab6-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714611 4929 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714624 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7x26\" (UniqueName: \"kubernetes.io/projected/a917968f-39da-4359-a8cc-3f3bf28e5ab6-kube-api-access-d7x26\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714634 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.714643 4929 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a917968f-39da-4359-a8cc-3f3bf28e5ab6-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.715905 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config" (OuterVolumeSpecName: "config") pod "5f87df65-8531-414a-83a3-b4fb6c5059f9" (UID: "5f87df65-8531-414a-83a3-b4fb6c5059f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.716051 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5f87df65-8531-414a-83a3-b4fb6c5059f9" (UID: "5f87df65-8531-414a-83a3-b4fb6c5059f9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.717798 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5f87df65-8531-414a-83a3-b4fb6c5059f9" (UID: "5f87df65-8531-414a-83a3-b4fb6c5059f9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.721519 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x" (OuterVolumeSpecName: "kube-api-access-6bt2x") pod "5f87df65-8531-414a-83a3-b4fb6c5059f9" (UID: "5f87df65-8531-414a-83a3-b4fb6c5059f9"). InnerVolumeSpecName "kube-api-access-6bt2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.816156 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6bt2x\" (UniqueName: \"kubernetes.io/projected/5f87df65-8531-414a-83a3-b4fb6c5059f9-kube-api-access-6bt2x\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.816198 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f87df65-8531-414a-83a3-b4fb6c5059f9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.816249 4929 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:35 crc kubenswrapper[4929]: I1122 07:23:35.816258 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f87df65-8531-414a-83a3-b4fb6c5059f9-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.234275 4929 generic.go:334] "Generic (PLEG): container finished" podID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerID="76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4" exitCode=0 Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.234359 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" event={"ID":"5f87df65-8531-414a-83a3-b4fb6c5059f9","Type":"ContainerDied","Data":"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4"} Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.234372 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.234391 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c" event={"ID":"5f87df65-8531-414a-83a3-b4fb6c5059f9","Type":"ContainerDied","Data":"a3794f301ac62b91919efef5b3d0703292a6c0c039ebe1169191cc47c7ec79cf"} Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.234412 4929 scope.go:117] "RemoveContainer" containerID="76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.236897 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" event={"ID":"a917968f-39da-4359-a8cc-3f3bf28e5ab6","Type":"ContainerDied","Data":"ee19b405e8fb1e4f934f6e8801f9a1a35f5283378399c87fc1e0b52632cc9b7d"} Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.236976 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-hxxfj" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.250753 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.252191 4929 scope.go:117] "RemoveContainer" containerID="76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4" Nov 22 07:23:36 crc kubenswrapper[4929]: E1122 07:23:36.252743 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4\": container with ID starting with 76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4 not found: ID does not exist" containerID="76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.252772 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4"} err="failed to get container status \"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4\": rpc error: code = NotFound desc = could not find container \"76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4\": container with ID starting with 76a72d6593ebf390e6a2bd088578b3237755a678f3a60283624045d9e199c4e4 not found: ID does not exist" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.252792 4929 scope.go:117] "RemoveContainer" containerID="60be21b1a40acd44b688d3a9dd22c032d3408875ffe72c6aa9b2b31f90e40903" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.254966 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lcg6c"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.264607 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.267475 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-hxxfj"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.401819 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp"] Nov 22 07:23:36 crc kubenswrapper[4929]: E1122 07:23:36.402090 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402113 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: E1122 07:23:36.402129 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402136 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: E1122 07:23:36.402153 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6776df8d-1529-41a1-9474-d368b6631779" containerName="registry" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402161 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6776df8d-1529-41a1-9474-d368b6631779" containerName="registry" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402297 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" containerName="controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402313 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" containerName="route-controller-manager" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402327 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6776df8d-1529-41a1-9474-d368b6631779" containerName="registry" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.402694 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.406103 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.406861 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.407057 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.407527 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.408115 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.408157 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.408329 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.408550 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.410591 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.410749 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.411174 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.411262 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.411392 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.413308 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.415674 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.417231 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.423229 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9"] Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524098 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8fz5\" (UniqueName: \"kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524170 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524272 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-config\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524298 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524314 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-client-ca\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524342 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8q6l\" (UniqueName: \"kubernetes.io/projected/2e90f68b-04fe-4393-93ab-1e7e02a4498a-kube-api-access-b8q6l\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524375 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524399 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-proxy-ca-bundles\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.524416 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e90f68b-04fe-4393-93ab-1e7e02a4498a-serving-cert\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626200 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626291 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-client-ca\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626331 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8q6l\" (UniqueName: \"kubernetes.io/projected/2e90f68b-04fe-4393-93ab-1e7e02a4498a-kube-api-access-b8q6l\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626381 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626410 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-proxy-ca-bundles\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626431 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e90f68b-04fe-4393-93ab-1e7e02a4498a-serving-cert\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626454 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8fz5\" (UniqueName: \"kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626473 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.626502 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-config\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.627530 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.627823 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-proxy-ca-bundles\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.628091 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-config\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.628289 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.628925 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2e90f68b-04fe-4393-93ab-1e7e02a4498a-client-ca\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.630310 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2e90f68b-04fe-4393-93ab-1e7e02a4498a-serving-cert\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.635768 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.651557 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9"] Nov 22 07:23:36 crc kubenswrapper[4929]: E1122 07:23:36.652732 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-h8fz5], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" podUID="f1e39193-e88f-4a2b-b65b-bf66afb15f26" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.664978 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8q6l\" (UniqueName: \"kubernetes.io/projected/2e90f68b-04fe-4393-93ab-1e7e02a4498a-kube-api-access-b8q6l\") pod \"controller-manager-74f9c4bf4b-xhlxp\" (UID: \"2e90f68b-04fe-4393-93ab-1e7e02a4498a\") " pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.667507 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8fz5\" (UniqueName: \"kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5\") pod \"route-controller-manager-b6d5f5458-4hvx9\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.724640 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:36 crc kubenswrapper[4929]: I1122 07:23:36.927456 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp"] Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.245145 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" event={"ID":"2e90f68b-04fe-4393-93ab-1e7e02a4498a","Type":"ContainerStarted","Data":"82e0439d7a6280e6692715a300b78afbde1b0859e22b810bf38a367ae6982c66"} Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.245171 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.253942 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.437545 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert\") pod \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.438043 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config\") pod \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.438106 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca\") pod \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.438134 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8fz5\" (UniqueName: \"kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5\") pod \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\" (UID: \"f1e39193-e88f-4a2b-b65b-bf66afb15f26\") " Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.438862 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca" (OuterVolumeSpecName: "client-ca") pod "f1e39193-e88f-4a2b-b65b-bf66afb15f26" (UID: "f1e39193-e88f-4a2b-b65b-bf66afb15f26"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.439007 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config" (OuterVolumeSpecName: "config") pod "f1e39193-e88f-4a2b-b65b-bf66afb15f26" (UID: "f1e39193-e88f-4a2b-b65b-bf66afb15f26"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.444023 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5" (OuterVolumeSpecName: "kube-api-access-h8fz5") pod "f1e39193-e88f-4a2b-b65b-bf66afb15f26" (UID: "f1e39193-e88f-4a2b-b65b-bf66afb15f26"). InnerVolumeSpecName "kube-api-access-h8fz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.444504 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f1e39193-e88f-4a2b-b65b-bf66afb15f26" (UID: "f1e39193-e88f-4a2b-b65b-bf66afb15f26"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.540100 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.540171 4929 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f1e39193-e88f-4a2b-b65b-bf66afb15f26-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.540188 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8fz5\" (UniqueName: \"kubernetes.io/projected/f1e39193-e88f-4a2b-b65b-bf66afb15f26-kube-api-access-h8fz5\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.540226 4929 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e39193-e88f-4a2b-b65b-bf66afb15f26-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.954619 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f87df65-8531-414a-83a3-b4fb6c5059f9" path="/var/lib/kubelet/pods/5f87df65-8531-414a-83a3-b4fb6c5059f9/volumes" Nov 22 07:23:37 crc kubenswrapper[4929]: I1122 07:23:37.955767 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a917968f-39da-4359-a8cc-3f3bf28e5ab6" path="/var/lib/kubelet/pods/a917968f-39da-4359-a8cc-3f3bf28e5ab6/volumes" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.252239 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.252240 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" event={"ID":"2e90f68b-04fe-4393-93ab-1e7e02a4498a","Type":"ContainerStarted","Data":"6a12cfa15286e825c1902e2cda19a697759954b2d8e5de953f42197298f8aac3"} Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.273177 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" podStartSLOduration=3.273153875 podStartE2EDuration="3.273153875s" podCreationTimestamp="2025-11-22 07:23:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:23:38.271119493 +0000 UTC m=+755.380573526" watchObservedRunningTime="2025-11-22 07:23:38.273153875 +0000 UTC m=+755.382607888" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.298841 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p"] Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.299555 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.302458 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.302794 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9"] Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.305611 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.306008 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.309511 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b6d5f5458-4hvx9"] Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.309668 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.312256 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.315765 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.325153 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p"] Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.451049 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-config\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.451154 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-client-ca\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.451180 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f91bfd2-698f-443f-aa5c-6ec41c34f157-serving-cert\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.451232 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl88r\" (UniqueName: \"kubernetes.io/projected/7f91bfd2-698f-443f-aa5c-6ec41c34f157-kube-api-access-gl88r\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.551953 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-client-ca\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.552010 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f91bfd2-698f-443f-aa5c-6ec41c34f157-serving-cert\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.552040 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl88r\" (UniqueName: \"kubernetes.io/projected/7f91bfd2-698f-443f-aa5c-6ec41c34f157-kube-api-access-gl88r\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.552085 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-config\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.552799 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-client-ca\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.553069 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7f91bfd2-698f-443f-aa5c-6ec41c34f157-config\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.559807 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7f91bfd2-698f-443f-aa5c-6ec41c34f157-serving-cert\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.569745 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl88r\" (UniqueName: \"kubernetes.io/projected/7f91bfd2-698f-443f-aa5c-6ec41c34f157-kube-api-access-gl88r\") pod \"route-controller-manager-6f748bb8b5-mck8p\" (UID: \"7f91bfd2-698f-443f-aa5c-6ec41c34f157\") " pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.619270 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:38 crc kubenswrapper[4929]: I1122 07:23:38.817720 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p"] Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.259197 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" event={"ID":"7f91bfd2-698f-443f-aa5c-6ec41c34f157","Type":"ContainerStarted","Data":"d4b0e51424593973e09bb168331b06a72e02b980f4c2dde5995ab33fcfe08ef7"} Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.259721 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.259781 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" event={"ID":"7f91bfd2-698f-443f-aa5c-6ec41c34f157","Type":"ContainerStarted","Data":"01ac7e298c9bed39d7b180369a09d5801fdd62426f63173882a589e15c1bd6a2"} Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.264266 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-74f9c4bf4b-xhlxp" Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.278418 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" podStartSLOduration=3.278394433 podStartE2EDuration="3.278394433s" podCreationTimestamp="2025-11-22 07:23:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:23:39.27630938 +0000 UTC m=+756.385763393" watchObservedRunningTime="2025-11-22 07:23:39.278394433 +0000 UTC m=+756.387848446" Nov 22 07:23:39 crc kubenswrapper[4929]: I1122 07:23:39.953325 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e39193-e88f-4a2b-b65b-bf66afb15f26" path="/var/lib/kubelet/pods/f1e39193-e88f-4a2b-b65b-bf66afb15f26/volumes" Nov 22 07:23:40 crc kubenswrapper[4929]: I1122 07:23:40.264824 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:40 crc kubenswrapper[4929]: I1122 07:23:40.277453 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6f748bb8b5-mck8p" Nov 22 07:23:42 crc kubenswrapper[4929]: I1122 07:23:42.551703 4929 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.259110 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-4wpmv"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.260434 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-4wpmv" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.263593 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-lfmhb"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.264403 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.264925 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.265031 4929 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-fmds8" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.264924 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.266539 4929 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-5mcnb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.273197 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-4wpmv"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.278135 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-lfmhb"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.300307 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-b8d64"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.303763 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.310473 4929 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-khswk" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.324486 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-b8d64"] Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.418583 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5fz8\" (UniqueName: \"kubernetes.io/projected/255da6c7-3a6c-4cda-99f3-1d8b3ed48139-kube-api-access-z5fz8\") pod \"cert-manager-webhook-5655c58dd6-b8d64\" (UID: \"255da6c7-3a6c-4cda-99f3-1d8b3ed48139\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.418650 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l7wj\" (UniqueName: \"kubernetes.io/projected/b471dac5-a291-4b5f-81fd-e17e4166a0fa-kube-api-access-9l7wj\") pod \"cert-manager-5b446d88c5-4wpmv\" (UID: \"b471dac5-a291-4b5f-81fd-e17e4166a0fa\") " pod="cert-manager/cert-manager-5b446d88c5-4wpmv" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.418688 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ljkn\" (UniqueName: \"kubernetes.io/projected/89a720f9-04b9-4275-b820-c65476faa487-kube-api-access-7ljkn\") pod \"cert-manager-cainjector-7f985d654d-lfmhb\" (UID: \"89a720f9-04b9-4275-b820-c65476faa487\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.520300 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ljkn\" (UniqueName: \"kubernetes.io/projected/89a720f9-04b9-4275-b820-c65476faa487-kube-api-access-7ljkn\") pod \"cert-manager-cainjector-7f985d654d-lfmhb\" (UID: \"89a720f9-04b9-4275-b820-c65476faa487\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.520893 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5fz8\" (UniqueName: \"kubernetes.io/projected/255da6c7-3a6c-4cda-99f3-1d8b3ed48139-kube-api-access-z5fz8\") pod \"cert-manager-webhook-5655c58dd6-b8d64\" (UID: \"255da6c7-3a6c-4cda-99f3-1d8b3ed48139\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.521006 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l7wj\" (UniqueName: \"kubernetes.io/projected/b471dac5-a291-4b5f-81fd-e17e4166a0fa-kube-api-access-9l7wj\") pod \"cert-manager-5b446d88c5-4wpmv\" (UID: \"b471dac5-a291-4b5f-81fd-e17e4166a0fa\") " pod="cert-manager/cert-manager-5b446d88c5-4wpmv" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.541868 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l7wj\" (UniqueName: \"kubernetes.io/projected/b471dac5-a291-4b5f-81fd-e17e4166a0fa-kube-api-access-9l7wj\") pod \"cert-manager-5b446d88c5-4wpmv\" (UID: \"b471dac5-a291-4b5f-81fd-e17e4166a0fa\") " pod="cert-manager/cert-manager-5b446d88c5-4wpmv" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.543053 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5fz8\" (UniqueName: \"kubernetes.io/projected/255da6c7-3a6c-4cda-99f3-1d8b3ed48139-kube-api-access-z5fz8\") pod \"cert-manager-webhook-5655c58dd6-b8d64\" (UID: \"255da6c7-3a6c-4cda-99f3-1d8b3ed48139\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.545886 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ljkn\" (UniqueName: \"kubernetes.io/projected/89a720f9-04b9-4275-b820-c65476faa487-kube-api-access-7ljkn\") pod \"cert-manager-cainjector-7f985d654d-lfmhb\" (UID: \"89a720f9-04b9-4275-b820-c65476faa487\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.587635 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-4wpmv" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.615447 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" Nov 22 07:24:05 crc kubenswrapper[4929]: I1122 07:24:05.629449 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.040625 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-4wpmv"] Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.044575 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-lfmhb"] Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.054847 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.162054 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-b8d64"] Nov 22 07:24:06 crc kubenswrapper[4929]: W1122 07:24:06.164545 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod255da6c7_3a6c_4cda_99f3_1d8b3ed48139.slice/crio-579f47e3495af4f42a04487650b443fd11db66a6037465af6f5815060c34d7da WatchSource:0}: Error finding container 579f47e3495af4f42a04487650b443fd11db66a6037465af6f5815060c34d7da: Status 404 returned error can't find the container with id 579f47e3495af4f42a04487650b443fd11db66a6037465af6f5815060c34d7da Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.394509 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" event={"ID":"255da6c7-3a6c-4cda-99f3-1d8b3ed48139","Type":"ContainerStarted","Data":"579f47e3495af4f42a04487650b443fd11db66a6037465af6f5815060c34d7da"} Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.395568 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" event={"ID":"89a720f9-04b9-4275-b820-c65476faa487","Type":"ContainerStarted","Data":"50506e09b1cdd5cb03d12f0f77e7eddb6ed4d70af07061945b8b5c611c22becf"} Nov 22 07:24:06 crc kubenswrapper[4929]: I1122 07:24:06.396611 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-4wpmv" event={"ID":"b471dac5-a291-4b5f-81fd-e17e4166a0fa","Type":"ContainerStarted","Data":"f5bf9f935a58d5fa31e7922e0cfadef16d7864751ae7280b16822f46c45b037c"} Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.538757 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vkn7m"] Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539740 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-controller" containerID="cri-o://dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539800 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="nbdb" containerID="cri-o://04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539869 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="northd" containerID="cri-o://1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539911 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539947 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-node" containerID="cri-o://25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.539982 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-acl-logging" containerID="cri-o://146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.540295 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="sbdb" containerID="cri-o://144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" gracePeriod=30 Nov 22 07:24:15 crc kubenswrapper[4929]: I1122 07:24:15.581861 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" containerID="cri-o://465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" gracePeriod=30 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.458626 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovnkube-controller/2.log" Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.461719 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-acl-logging/0.log" Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.462644 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-controller/0.log" Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.463827 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" exitCode=0 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.463865 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82"} Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.464097 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2"} Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.464120 4929 scope.go:117] "RemoveContainer" containerID="c2767c12d8836dca56016e73dbd1efcd722cd10f818b93ebad28312457d57ae9" Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.464595 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2" exitCode=0 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465132 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c" exitCode=0 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465342 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12" exitCode=143 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465468 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359" exitCode=143 Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465220 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c"} Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465672 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12"} Nov 22 07:24:16 crc kubenswrapper[4929]: I1122 07:24:16.465789 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359"} Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.472888 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-acl-logging/0.log" Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473302 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-controller/0.log" Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473593 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" exitCode=0 Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473617 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" exitCode=0 Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473626 4929 generic.go:334] "Generic (PLEG): container finished" podID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerID="1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6" exitCode=0 Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473683 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892"} Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473708 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f"} Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.473719 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6"} Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.475538 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bb6rk_763619b4-b584-4089-bd56-96823e22e25e/kube-multus/1.log" Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.476023 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bb6rk_763619b4-b584-4089-bd56-96823e22e25e/kube-multus/0.log" Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.476049 4929 generic.go:334] "Generic (PLEG): container finished" podID="763619b4-b584-4089-bd56-96823e22e25e" containerID="a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1" exitCode=2 Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.476064 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerDied","Data":"a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1"} Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.476084 4929 scope.go:117] "RemoveContainer" containerID="d3e7f7b5bfa3f2a5d2546f8821197521ba6e2126fee2659eef6be8f9a2a6beb8" Nov 22 07:24:17 crc kubenswrapper[4929]: I1122 07:24:17.476418 4929 scope.go:117] "RemoveContainer" containerID="a4024d76014e52596406e6b6941d0087be76f8bb3a2b7f771c22db099c914ff1" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.607367 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892 is running failed: container process not found" containerID="144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.607382 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82 is running failed: container process not found" containerID="465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.607708 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f is running failed: container process not found" containerID="04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608157 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82 is running failed: container process not found" containerID="465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608179 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892 is running failed: container process not found" containerID="144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608184 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f is running failed: container process not found" containerID="04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608534 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82 is running failed: container process not found" containerID="465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" cmd=["/bin/bash","-c","#!/bin/bash\ntest -f /etc/cni/net.d/10-ovn-kubernetes.conf\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608564 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608606 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f is running failed: container process not found" containerID="04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608659 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="nbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608615 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892 is running failed: container process not found" containerID="144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.608734 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="sbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.831621 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-acl-logging/0.log" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.832041 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-controller/0.log" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.832416 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887617 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-sxktf"] Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887837 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-node" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887848 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-node" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887856 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887864 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887874 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="sbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887879 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="sbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887888 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="nbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887894 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="nbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887902 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887908 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887915 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887921 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887932 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="northd" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887938 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="northd" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887944 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kubecfg-setup" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887949 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kubecfg-setup" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887959 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887965 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887974 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887980 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.887988 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-acl-logging" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.887993 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-acl-logging" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888081 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888091 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="nbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888098 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888104 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888111 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888121 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888129 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888136 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="northd" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888144 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="kube-rbac-proxy-node" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888152 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovn-acl-logging" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888160 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="sbdb" Nov 22 07:24:18 crc kubenswrapper[4929]: E1122 07:24:18.888367 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.888376 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" containerName="ovnkube-controller" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.890465 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902590 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902673 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902706 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902735 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902822 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4rhl\" (UniqueName: \"kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902857 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902876 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log" (OuterVolumeSpecName: "node-log") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902906 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902928 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902945 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902952 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902981 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903027 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903054 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903117 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903151 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903192 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903244 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903300 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903341 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903372 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903402 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903524 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet\") pod \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\" (UID: \"77273e11-9bf5-4480-bf99-266ae3f1ed7a\") " Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.902984 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903118 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash" (OuterVolumeSpecName: "host-slash") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903604 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903680 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903726 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903736 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903738 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-systemd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903761 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903764 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket" (OuterVolumeSpecName: "log-socket") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903779 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-env-overrides\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903784 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903833 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-log-socket\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903869 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903873 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903887 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-config\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903873 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903905 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.903935 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-ovn\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904007 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-slash\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904031 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-node-log\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904053 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-netd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904074 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904128 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-systemd-units\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904149 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-etc-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904182 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-var-lib-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904265 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-netns\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904384 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovn-node-metrics-cert\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904427 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-bin\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904460 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904517 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxzcq\" (UniqueName: \"kubernetes.io/projected/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-kube-api-access-cxzcq\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904633 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904683 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-script-lib\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904714 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-kubelet\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904819 4929 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904843 4929 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-node-log\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904860 4929 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904879 4929 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904897 4929 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904908 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904914 4929 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-slash\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904953 4929 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904967 4929 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904982 4929 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.904997 4929 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905012 4929 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-log-socket\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905023 4929 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905037 4929 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905048 4929 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905060 4929 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.905075 4929 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.908573 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.909948 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl" (OuterVolumeSpecName: "kube-api-access-j4rhl") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "kube-api-access-j4rhl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:24:18 crc kubenswrapper[4929]: I1122 07:24:18.920461 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "77273e11-9bf5-4480-bf99-266ae3f1ed7a" (UID: "77273e11-9bf5-4480-bf99-266ae3f1ed7a"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006659 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-netns\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006779 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovn-node-metrics-cert\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006802 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-netns\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006814 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-bin\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006882 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006904 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxzcq\" (UniqueName: \"kubernetes.io/projected/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-kube-api-access-cxzcq\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006940 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006964 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-script-lib\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.006986 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-kubelet\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007066 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-systemd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007101 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-env-overrides\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007124 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-log-socket\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007148 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007168 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-config\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-ovn\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007247 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-slash\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007271 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-node-log\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007292 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-netd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007316 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-systemd-units\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007336 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-etc-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007359 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-var-lib-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007406 4929 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007422 4929 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/77273e11-9bf5-4480-bf99-266ae3f1ed7a-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007434 4929 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/77273e11-9bf5-4480-bf99-266ae3f1ed7a-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007446 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4rhl\" (UniqueName: \"kubernetes.io/projected/77273e11-9bf5-4480-bf99-266ae3f1ed7a-kube-api-access-j4rhl\") on node \"crc\" DevicePath \"\"" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007477 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-var-lib-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007501 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007608 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-bin\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007685 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-node-log\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007694 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-run-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007740 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007876 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-cni-netd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007912 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-systemd-units\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007938 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-etc-openvswitch\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007963 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-ovn\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007993 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-kubelet\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.007888 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-run-systemd\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.008098 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-log-socket\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.008440 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-config\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.008538 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovnkube-script-lib\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.008554 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-env-overrides\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.008612 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-host-slash\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.010825 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-ovn-node-metrics-cert\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.023122 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxzcq\" (UniqueName: \"kubernetes.io/projected/8e1f5b99-b2ca-4e9c-adaf-a622844e1696-kube-api-access-cxzcq\") pod \"ovnkube-node-sxktf\" (UID: \"8e1f5b99-b2ca-4e9c-adaf-a622844e1696\") " pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.210184 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.488752 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-acl-logging/0.log" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.489259 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-vkn7m_77273e11-9bf5-4480-bf99-266ae3f1ed7a/ovn-controller/0.log" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.489585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" event={"ID":"77273e11-9bf5-4480-bf99-266ae3f1ed7a","Type":"ContainerDied","Data":"97170fcf4b1a959dfcdd406ec04c18cb99631279bef967842a66051a4dff40e8"} Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.489669 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vkn7m" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.525333 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vkn7m"] Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.528185 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vkn7m"] Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.863618 4929 scope.go:117] "RemoveContainer" containerID="465a576a7de5c1250d5df97ff9895da8db1174cb805c4416d9702732a59ccb82" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.899070 4929 scope.go:117] "RemoveContainer" containerID="144e4ee91a6fdccc58049d18eb6e660e84ef773859436e1c431b75c7e6578892" Nov 22 07:24:19 crc kubenswrapper[4929]: W1122 07:24:19.906148 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e1f5b99_b2ca_4e9c_adaf_a622844e1696.slice/crio-082a4135e0ade3642fa41124acf3c4593970ac30b890d054e4905a74cd30378d WatchSource:0}: Error finding container 082a4135e0ade3642fa41124acf3c4593970ac30b890d054e4905a74cd30378d: Status 404 returned error can't find the container with id 082a4135e0ade3642fa41124acf3c4593970ac30b890d054e4905a74cd30378d Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.956272 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77273e11-9bf5-4480-bf99-266ae3f1ed7a" path="/var/lib/kubelet/pods/77273e11-9bf5-4480-bf99-266ae3f1ed7a/volumes" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.962490 4929 scope.go:117] "RemoveContainer" containerID="04c0ca48da257c3c33260771f6308618355f86c7f19ea6875a19b8d1a169a58f" Nov 22 07:24:19 crc kubenswrapper[4929]: I1122 07:24:19.987770 4929 scope.go:117] "RemoveContainer" containerID="1d1cc9d42db58cb62a3463a4c29dbdb2dec067221983911a35e0d78fe36adba6" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.005074 4929 scope.go:117] "RemoveContainer" containerID="86555bcbf270d8b2b0bf8284f0698a04a8a950d852d8122f865091fc514310c2" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.022010 4929 scope.go:117] "RemoveContainer" containerID="25b6b32f5c87493866068116603e7433e74e8e8c77782ede20bf2ac6dfcc773c" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.036911 4929 scope.go:117] "RemoveContainer" containerID="146eb24644d8bc70a5ddea30dc79f2e08fc73c737c3f7a496b5b4c055b2e5c12" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.052559 4929 scope.go:117] "RemoveContainer" containerID="dda4febfe7e5ed49b3c0ad50e7ab85a3c2486d5c765e6dd4cdec0243ab5c3359" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.071512 4929 scope.go:117] "RemoveContainer" containerID="fcd1b6e2d40cf084d062d24bc7a86c3fd4b50684c9702986f7d6d162773972c2" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.499054 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e1f5b99-b2ca-4e9c-adaf-a622844e1696" containerID="f9e1be38ead0bdd91c6a8dc59b2352180fc2df5b02e115f8ff46bd6ffbdab210" exitCode=0 Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.499138 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerDied","Data":"f9e1be38ead0bdd91c6a8dc59b2352180fc2df5b02e115f8ff46bd6ffbdab210"} Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.499182 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"082a4135e0ade3642fa41124acf3c4593970ac30b890d054e4905a74cd30378d"} Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.503021 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-bb6rk_763619b4-b584-4089-bd56-96823e22e25e/kube-multus/1.log" Nov 22 07:24:20 crc kubenswrapper[4929]: I1122 07:24:20.503071 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-bb6rk" event={"ID":"763619b4-b584-4089-bd56-96823e22e25e","Type":"ContainerStarted","Data":"71c8eab2bb72a5b94583a62fb39162b0e197381efc1df47f9f0cfe06b5107af8"} Nov 22 07:24:21 crc kubenswrapper[4929]: I1122 07:24:21.509801 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"61ab185ee07a1a965c694986fa647daf61f5a6a841f4c59804e45205ffa1dfaf"} Nov 22 07:24:21 crc kubenswrapper[4929]: I1122 07:24:21.510181 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"24eff82ba8c8c58c2e165b49a8a6ff2220234600c3fbe3639cc70ba15da942ba"} Nov 22 07:24:21 crc kubenswrapper[4929]: I1122 07:24:21.510196 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"354a3815546b6015adebd05089cb7fc89e6226b9a9bd9645817b98cff2d78d2c"} Nov 22 07:24:22 crc kubenswrapper[4929]: I1122 07:24:22.516988 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"2d603718248f395d40071b1e09dad47462de9a95f4fbccc691f9eac21a674606"} Nov 22 07:24:22 crc kubenswrapper[4929]: I1122 07:24:22.517317 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"55bffe0b7c6a76c779f4362951bcf0f200cef6ace8909342abb802184bca3b7d"} Nov 22 07:24:22 crc kubenswrapper[4929]: I1122 07:24:22.517333 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"0021c4043ab4ea4b742504849b9fa8d158d686e06d5ec644b1fe849175748095"} Nov 22 07:24:25 crc kubenswrapper[4929]: I1122 07:24:25.535555 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"397f2a5fb238275b3b9676811f6b896913542138847740aa44ecfd623c1ec64b"} Nov 22 07:24:29 crc kubenswrapper[4929]: I1122 07:24:29.561823 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" event={"ID":"8e1f5b99-b2ca-4e9c-adaf-a622844e1696","Type":"ContainerStarted","Data":"cb032fbdb4fd2c279d880abb6827d5a7bcaaec9433ddda8cbbf341ef00ea94ee"} Nov 22 07:24:30 crc kubenswrapper[4929]: I1122 07:24:30.568694 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:30 crc kubenswrapper[4929]: I1122 07:24:30.569228 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:30 crc kubenswrapper[4929]: I1122 07:24:30.602504 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:30 crc kubenswrapper[4929]: I1122 07:24:30.633957 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" podStartSLOduration=12.633937616 podStartE2EDuration="12.633937616s" podCreationTimestamp="2025-11-22 07:24:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:24:30.603023272 +0000 UTC m=+807.712477305" watchObservedRunningTime="2025-11-22 07:24:30.633937616 +0000 UTC m=+807.743391629" Nov 22 07:24:31 crc kubenswrapper[4929]: I1122 07:24:31.575543 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:31 crc kubenswrapper[4929]: I1122 07:24:31.616699 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:24:34 crc kubenswrapper[4929]: I1122 07:24:34.592397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" event={"ID":"89a720f9-04b9-4275-b820-c65476faa487","Type":"ContainerStarted","Data":"20ec751245cfe14d712a655fd103f3019fc24a459eafeae7ec57e02c355b43e3"} Nov 22 07:24:35 crc kubenswrapper[4929]: I1122 07:24:35.599859 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-4wpmv" event={"ID":"b471dac5-a291-4b5f-81fd-e17e4166a0fa","Type":"ContainerStarted","Data":"9a02482a989f3dea63d01509d0ec94370989c0cffc6b607ed2225c58ad7bc4a3"} Nov 22 07:24:35 crc kubenswrapper[4929]: I1122 07:24:35.616686 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-4wpmv" podStartSLOduration=2.5374271840000002 podStartE2EDuration="30.616670017s" podCreationTimestamp="2025-11-22 07:24:05 +0000 UTC" firstStartedPulling="2025-11-22 07:24:06.055865339 +0000 UTC m=+783.165319342" lastFinishedPulling="2025-11-22 07:24:34.135108122 +0000 UTC m=+811.244562175" observedRunningTime="2025-11-22 07:24:35.613739723 +0000 UTC m=+812.723193736" watchObservedRunningTime="2025-11-22 07:24:35.616670017 +0000 UTC m=+812.726124030" Nov 22 07:24:35 crc kubenswrapper[4929]: I1122 07:24:35.631870 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-lfmhb" podStartSLOduration=2.545605391 podStartE2EDuration="30.631855322s" podCreationTimestamp="2025-11-22 07:24:05 +0000 UTC" firstStartedPulling="2025-11-22 07:24:06.054618647 +0000 UTC m=+783.164072660" lastFinishedPulling="2025-11-22 07:24:34.140868538 +0000 UTC m=+811.250322591" observedRunningTime="2025-11-22 07:24:35.63097064 +0000 UTC m=+812.740424673" watchObservedRunningTime="2025-11-22 07:24:35.631855322 +0000 UTC m=+812.741309325" Nov 22 07:24:38 crc kubenswrapper[4929]: I1122 07:24:38.619299 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" event={"ID":"255da6c7-3a6c-4cda-99f3-1d8b3ed48139","Type":"ContainerStarted","Data":"16d173eb33c0d8214f6008ff466778ae782a5eca88be2a52e9dbc9b0af726d0e"} Nov 22 07:24:38 crc kubenswrapper[4929]: I1122 07:24:38.619647 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:38 crc kubenswrapper[4929]: I1122 07:24:38.640364 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" podStartSLOduration=2.846917645 podStartE2EDuration="33.640341691s" podCreationTimestamp="2025-11-22 07:24:05 +0000 UTC" firstStartedPulling="2025-11-22 07:24:06.166953947 +0000 UTC m=+783.276407960" lastFinishedPulling="2025-11-22 07:24:36.960377983 +0000 UTC m=+814.069832006" observedRunningTime="2025-11-22 07:24:38.637823977 +0000 UTC m=+815.747278060" watchObservedRunningTime="2025-11-22 07:24:38.640341691 +0000 UTC m=+815.749795714" Nov 22 07:24:45 crc kubenswrapper[4929]: I1122 07:24:45.632709 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-b8d64" Nov 22 07:24:48 crc kubenswrapper[4929]: I1122 07:24:48.594279 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:24:48 crc kubenswrapper[4929]: I1122 07:24:48.594384 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:24:49 crc kubenswrapper[4929]: I1122 07:24:49.247981 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-sxktf" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.375694 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7"] Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.377185 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.379364 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.392481 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7"] Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.502377 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.502441 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dx7d\" (UniqueName: \"kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.502526 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.595076 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.595189 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.604172 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.604338 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dx7d\" (UniqueName: \"kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.604461 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.605000 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.605297 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.646402 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dx7d\" (UniqueName: \"kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:18 crc kubenswrapper[4929]: I1122 07:25:18.693554 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:19 crc kubenswrapper[4929]: I1122 07:25:19.192623 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7"] Nov 22 07:25:19 crc kubenswrapper[4929]: I1122 07:25:19.883461 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerStarted","Data":"be52a6ef5a17d741ca7f7bb716e211fc9f8e0451ebc1c5a3857103af44afed6d"} Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.742743 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.743988 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.762869 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.837186 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.837265 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.837367 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrwmb\" (UniqueName: \"kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.893071 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerStarted","Data":"9b0424f823c758568e0de6e36595eb0520987b488a4569d387a0af69e9b7f349"} Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.939371 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.939426 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.939449 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrwmb\" (UniqueName: \"kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.940560 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.941105 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:20 crc kubenswrapper[4929]: I1122 07:25:20.972293 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrwmb\" (UniqueName: \"kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb\") pod \"redhat-operators-rrqrb\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.068557 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.271901 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.898574 4929 generic.go:334] "Generic (PLEG): container finished" podID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerID="9b0424f823c758568e0de6e36595eb0520987b488a4569d387a0af69e9b7f349" exitCode=0 Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.898629 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerDied","Data":"9b0424f823c758568e0de6e36595eb0520987b488a4569d387a0af69e9b7f349"} Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.900393 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerStarted","Data":"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2"} Nov 22 07:25:21 crc kubenswrapper[4929]: I1122 07:25:21.900456 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerStarted","Data":"3bf554875812699ceb305c467e194e2296edadf7bb648fdb01d8f7b7df52d952"} Nov 22 07:25:22 crc kubenswrapper[4929]: I1122 07:25:22.908455 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerID="fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2" exitCode=0 Nov 22 07:25:22 crc kubenswrapper[4929]: I1122 07:25:22.908525 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerDied","Data":"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2"} Nov 22 07:25:34 crc kubenswrapper[4929]: I1122 07:25:34.988623 4929 generic.go:334] "Generic (PLEG): container finished" podID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerID="7554a362939eeaf56b33f67397ab7309513c9560ea56d401e658f6a93ee6f905" exitCode=0 Nov 22 07:25:34 crc kubenswrapper[4929]: I1122 07:25:34.988715 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerDied","Data":"7554a362939eeaf56b33f67397ab7309513c9560ea56d401e658f6a93ee6f905"} Nov 22 07:25:34 crc kubenswrapper[4929]: I1122 07:25:34.992559 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerID="0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3" exitCode=0 Nov 22 07:25:34 crc kubenswrapper[4929]: I1122 07:25:34.992608 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerDied","Data":"0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3"} Nov 22 07:25:36 crc kubenswrapper[4929]: I1122 07:25:36.001926 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerStarted","Data":"fd988b9f960eec170a8b7403b5c9a3bf58df356796f89b2a798572e0ebadf53f"} Nov 22 07:25:36 crc kubenswrapper[4929]: I1122 07:25:36.026153 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" podStartSLOduration=6.48075567 podStartE2EDuration="18.026126213s" podCreationTimestamp="2025-11-22 07:25:18 +0000 UTC" firstStartedPulling="2025-11-22 07:25:21.900067992 +0000 UTC m=+859.009522005" lastFinishedPulling="2025-11-22 07:25:33.445438495 +0000 UTC m=+870.554892548" observedRunningTime="2025-11-22 07:25:36.023300081 +0000 UTC m=+873.132754094" watchObservedRunningTime="2025-11-22 07:25:36.026126213 +0000 UTC m=+873.135580266" Nov 22 07:25:37 crc kubenswrapper[4929]: I1122 07:25:37.014729 4929 generic.go:334] "Generic (PLEG): container finished" podID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerID="fd988b9f960eec170a8b7403b5c9a3bf58df356796f89b2a798572e0ebadf53f" exitCode=0 Nov 22 07:25:37 crc kubenswrapper[4929]: I1122 07:25:37.014817 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerDied","Data":"fd988b9f960eec170a8b7403b5c9a3bf58df356796f89b2a798572e0ebadf53f"} Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.319685 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.482305 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle\") pod \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.482435 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dx7d\" (UniqueName: \"kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d\") pod \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.482516 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util\") pod \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\" (UID: \"de0e8764-3b4b-4ce2-83a4-27ae50c897e8\") " Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.485250 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle" (OuterVolumeSpecName: "bundle") pod "de0e8764-3b4b-4ce2-83a4-27ae50c897e8" (UID: "de0e8764-3b4b-4ce2-83a4-27ae50c897e8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.490548 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d" (OuterVolumeSpecName: "kube-api-access-4dx7d") pod "de0e8764-3b4b-4ce2-83a4-27ae50c897e8" (UID: "de0e8764-3b4b-4ce2-83a4-27ae50c897e8"). InnerVolumeSpecName "kube-api-access-4dx7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.496399 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util" (OuterVolumeSpecName: "util") pod "de0e8764-3b4b-4ce2-83a4-27ae50c897e8" (UID: "de0e8764-3b4b-4ce2-83a4-27ae50c897e8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.584325 4929 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.584389 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4dx7d\" (UniqueName: \"kubernetes.io/projected/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-kube-api-access-4dx7d\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:38 crc kubenswrapper[4929]: I1122 07:25:38.584407 4929 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de0e8764-3b4b-4ce2-83a4-27ae50c897e8-util\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:39 crc kubenswrapper[4929]: I1122 07:25:39.030411 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerStarted","Data":"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719"} Nov 22 07:25:39 crc kubenswrapper[4929]: I1122 07:25:39.032505 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" event={"ID":"de0e8764-3b4b-4ce2-83a4-27ae50c897e8","Type":"ContainerDied","Data":"be52a6ef5a17d741ca7f7bb716e211fc9f8e0451ebc1c5a3857103af44afed6d"} Nov 22 07:25:39 crc kubenswrapper[4929]: I1122 07:25:39.032545 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be52a6ef5a17d741ca7f7bb716e211fc9f8e0451ebc1c5a3857103af44afed6d" Nov 22 07:25:39 crc kubenswrapper[4929]: I1122 07:25:39.032599 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7" Nov 22 07:25:39 crc kubenswrapper[4929]: I1122 07:25:39.057624 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rrqrb" podStartSLOduration=4.288631514 podStartE2EDuration="19.057599695s" podCreationTimestamp="2025-11-22 07:25:20 +0000 UTC" firstStartedPulling="2025-11-22 07:25:22.911115759 +0000 UTC m=+860.020569772" lastFinishedPulling="2025-11-22 07:25:37.6800839 +0000 UTC m=+874.789537953" observedRunningTime="2025-11-22 07:25:39.055338218 +0000 UTC m=+876.164792261" watchObservedRunningTime="2025-11-22 07:25:39.057599695 +0000 UTC m=+876.167053718" Nov 22 07:25:41 crc kubenswrapper[4929]: I1122 07:25:41.068960 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:41 crc kubenswrapper[4929]: I1122 07:25:41.069582 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:42 crc kubenswrapper[4929]: I1122 07:25:42.107750 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rrqrb" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="registry-server" probeResult="failure" output=< Nov 22 07:25:42 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:25:42 crc kubenswrapper[4929]: > Nov 22 07:25:49 crc kubenswrapper[4929]: I1122 07:25:48.594928 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:25:49 crc kubenswrapper[4929]: I1122 07:25:48.595460 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:25:49 crc kubenswrapper[4929]: I1122 07:25:48.595501 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:25:49 crc kubenswrapper[4929]: I1122 07:25:48.596032 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:25:49 crc kubenswrapper[4929]: I1122 07:25:48.596081 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527" gracePeriod=600 Nov 22 07:25:50 crc kubenswrapper[4929]: I1122 07:25:50.104943 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527" exitCode=0 Nov 22 07:25:50 crc kubenswrapper[4929]: I1122 07:25:50.105005 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527"} Nov 22 07:25:50 crc kubenswrapper[4929]: I1122 07:25:50.105335 4929 scope.go:117] "RemoveContainer" containerID="1bfc549cc11cef76d5c77ecfb718d116f0f725033ed35b6092cba8d369cf2b6c" Nov 22 07:25:51 crc kubenswrapper[4929]: I1122 07:25:51.112123 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83"} Nov 22 07:25:51 crc kubenswrapper[4929]: I1122 07:25:51.115714 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:51 crc kubenswrapper[4929]: I1122 07:25:51.160673 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.197054 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k"] Nov 22 07:25:52 crc kubenswrapper[4929]: E1122 07:25:52.197652 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="util" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.197668 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="util" Nov 22 07:25:52 crc kubenswrapper[4929]: E1122 07:25:52.197684 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="pull" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.197691 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="pull" Nov 22 07:25:52 crc kubenswrapper[4929]: E1122 07:25:52.197711 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="extract" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.197719 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="extract" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.197833 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="de0e8764-3b4b-4ce2-83a4-27ae50c897e8" containerName="extract" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.198356 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.205915 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-l2q7c" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.206614 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.207153 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.210578 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.297939 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sl7x\" (UniqueName: \"kubernetes.io/projected/b8383ad3-1d58-4c89-ab4b-874351c249f3-kube-api-access-6sl7x\") pod \"obo-prometheus-operator-668cf9dfbb-88c4k\" (UID: \"b8383ad3-1d58-4c89-ab4b-874351c249f3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.332244 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.333225 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.335140 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.335673 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-ttvmc" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.347759 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.352650 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.353508 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.371616 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.399508 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sl7x\" (UniqueName: \"kubernetes.io/projected/b8383ad3-1d58-4c89-ab4b-874351c249f3-kube-api-access-6sl7x\") pod \"obo-prometheus-operator-668cf9dfbb-88c4k\" (UID: \"b8383ad3-1d58-4c89-ab4b-874351c249f3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.421917 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sl7x\" (UniqueName: \"kubernetes.io/projected/b8383ad3-1d58-4c89-ab4b-874351c249f3-kube-api-access-6sl7x\") pod \"obo-prometheus-operator-668cf9dfbb-88c4k\" (UID: \"b8383ad3-1d58-4c89-ab4b-874351c249f3\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.457914 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-h45rh"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.458801 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.461473 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.461794 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-gnf6c" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.474089 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-h45rh"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.500870 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.500977 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.501012 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.501038 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.514690 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602442 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602490 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqtrr\" (UniqueName: \"kubernetes.io/projected/117a2144-28d4-4377-973e-3eea96a6a609-kube-api-access-cqtrr\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602520 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602598 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/117a2144-28d4-4377-973e-3eea96a6a609-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.602628 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.607097 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.609657 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/180a8947-9c9f-4870-bfec-07b7cb8c378a-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-htblg\" (UID: \"180a8947-9c9f-4870-bfec-07b7cb8c378a\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.615584 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.617786 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ddfa0602-ba6c-4d22-9842-a824a8b4a5b4-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p\" (UID: \"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.658714 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.665856 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-t5xzk"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.667529 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.673321 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-jfvf8" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.673756 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-t5xzk"] Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.673877 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.703271 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/117a2144-28d4-4377-973e-3eea96a6a609-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.703349 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqtrr\" (UniqueName: \"kubernetes.io/projected/117a2144-28d4-4377-973e-3eea96a6a609-kube-api-access-cqtrr\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.709825 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/117a2144-28d4-4377-973e-3eea96a6a609-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.724319 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqtrr\" (UniqueName: \"kubernetes.io/projected/117a2144-28d4-4377-973e-3eea96a6a609-kube-api-access-cqtrr\") pod \"observability-operator-d8bb48f5d-h45rh\" (UID: \"117a2144-28d4-4377-973e-3eea96a6a609\") " pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.776831 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.805140 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hn9r6\" (UniqueName: \"kubernetes.io/projected/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-kube-api-access-hn9r6\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.805199 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-openshift-service-ca\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.907788 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hn9r6\" (UniqueName: \"kubernetes.io/projected/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-kube-api-access-hn9r6\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.907838 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-openshift-service-ca\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.908759 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-openshift-service-ca\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.934885 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hn9r6\" (UniqueName: \"kubernetes.io/projected/1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8-kube-api-access-hn9r6\") pod \"perses-operator-5446b9c989-t5xzk\" (UID: \"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8\") " pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:52 crc kubenswrapper[4929]: I1122 07:25:52.938321 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p"] Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.002510 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.010492 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k"] Nov 22 07:25:53 crc kubenswrapper[4929]: W1122 07:25:53.018342 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8383ad3_1d58_4c89_ab4b_874351c249f3.slice/crio-bf583b8038a7d031b55e8ca1ce28761028e4541483fa2781c7a4945ebbaae09f WatchSource:0}: Error finding container bf583b8038a7d031b55e8ca1ce28761028e4541483fa2781c7a4945ebbaae09f: Status 404 returned error can't find the container with id bf583b8038a7d031b55e8ca1ce28761028e4541483fa2781c7a4945ebbaae09f Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.073138 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-h45rh"] Nov 22 07:25:53 crc kubenswrapper[4929]: W1122 07:25:53.090406 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod117a2144_28d4_4377_973e_3eea96a6a609.slice/crio-58d28d1258f9761d621c2924028920fb4c898e17c31e90b730446592a03e7d39 WatchSource:0}: Error finding container 58d28d1258f9761d621c2924028920fb4c898e17c31e90b730446592a03e7d39: Status 404 returned error can't find the container with id 58d28d1258f9761d621c2924028920fb4c898e17c31e90b730446592a03e7d39 Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.129541 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" event={"ID":"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4","Type":"ContainerStarted","Data":"2e34d03b51590394f402aef48efce7da401daaddcc816703e41cc0446438a3f0"} Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.133591 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" event={"ID":"117a2144-28d4-4377-973e-3eea96a6a609","Type":"ContainerStarted","Data":"58d28d1258f9761d621c2924028920fb4c898e17c31e90b730446592a03e7d39"} Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.139345 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" event={"ID":"b8383ad3-1d58-4c89-ab4b-874351c249f3","Type":"ContainerStarted","Data":"bf583b8038a7d031b55e8ca1ce28761028e4541483fa2781c7a4945ebbaae09f"} Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.194486 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg"] Nov 22 07:25:53 crc kubenswrapper[4929]: W1122 07:25:53.244868 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod180a8947_9c9f_4870_bfec_07b7cb8c378a.slice/crio-d2569448f99927c4385195ff0234c50f5052591ffba0582ee903310823a48915 WatchSource:0}: Error finding container d2569448f99927c4385195ff0234c50f5052591ffba0582ee903310823a48915: Status 404 returned error can't find the container with id d2569448f99927c4385195ff0234c50f5052591ffba0582ee903310823a48915 Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.309775 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-t5xzk"] Nov 22 07:25:53 crc kubenswrapper[4929]: W1122 07:25:53.318139 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1e90cb83_8f01_48a0_9d5e_9dbdacb9c9b8.slice/crio-2e37326a822f1f52f6fb2dd6991d81877e57e0b582a7846d90043546b3307274 WatchSource:0}: Error finding container 2e37326a822f1f52f6fb2dd6991d81877e57e0b582a7846d90043546b3307274: Status 404 returned error can't find the container with id 2e37326a822f1f52f6fb2dd6991d81877e57e0b582a7846d90043546b3307274 Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.465079 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.465356 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rrqrb" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="registry-server" containerID="cri-o://318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719" gracePeriod=2 Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.852546 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.961636 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content\") pod \"8e3d6e00-6de1-4857-b7af-f41adca708ce\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.961730 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrwmb\" (UniqueName: \"kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb\") pod \"8e3d6e00-6de1-4857-b7af-f41adca708ce\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.961795 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities\") pod \"8e3d6e00-6de1-4857-b7af-f41adca708ce\" (UID: \"8e3d6e00-6de1-4857-b7af-f41adca708ce\") " Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.963173 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities" (OuterVolumeSpecName: "utilities") pod "8e3d6e00-6de1-4857-b7af-f41adca708ce" (UID: "8e3d6e00-6de1-4857-b7af-f41adca708ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:25:53 crc kubenswrapper[4929]: I1122 07:25:53.967791 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb" (OuterVolumeSpecName: "kube-api-access-vrwmb") pod "8e3d6e00-6de1-4857-b7af-f41adca708ce" (UID: "8e3d6e00-6de1-4857-b7af-f41adca708ce"). InnerVolumeSpecName "kube-api-access-vrwmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.063028 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrwmb\" (UniqueName: \"kubernetes.io/projected/8e3d6e00-6de1-4857-b7af-f41adca708ce-kube-api-access-vrwmb\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.063053 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.107819 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e3d6e00-6de1-4857-b7af-f41adca708ce" (UID: "8e3d6e00-6de1-4857-b7af-f41adca708ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.151943 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerID="318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719" exitCode=0 Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.151985 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rrqrb" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.152026 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerDied","Data":"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719"} Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.152057 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rrqrb" event={"ID":"8e3d6e00-6de1-4857-b7af-f41adca708ce","Type":"ContainerDied","Data":"3bf554875812699ceb305c467e194e2296edadf7bb648fdb01d8f7b7df52d952"} Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.152080 4929 scope.go:117] "RemoveContainer" containerID="318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.154045 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" event={"ID":"180a8947-9c9f-4870-bfec-07b7cb8c378a","Type":"ContainerStarted","Data":"d2569448f99927c4385195ff0234c50f5052591ffba0582ee903310823a48915"} Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.155389 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" event={"ID":"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8","Type":"ContainerStarted","Data":"2e37326a822f1f52f6fb2dd6991d81877e57e0b582a7846d90043546b3307274"} Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.164658 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e3d6e00-6de1-4857-b7af-f41adca708ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.171435 4929 scope.go:117] "RemoveContainer" containerID="0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.185653 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.187975 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rrqrb"] Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.220534 4929 scope.go:117] "RemoveContainer" containerID="fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.236528 4929 scope.go:117] "RemoveContainer" containerID="318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719" Nov 22 07:25:54 crc kubenswrapper[4929]: E1122 07:25:54.237120 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719\": container with ID starting with 318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719 not found: ID does not exist" containerID="318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.237320 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719"} err="failed to get container status \"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719\": rpc error: code = NotFound desc = could not find container \"318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719\": container with ID starting with 318a0ae02a2c58b2c24f6bc6eec3c32071d5144b484c9c9cdca85ba7dfbb2719 not found: ID does not exist" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.237360 4929 scope.go:117] "RemoveContainer" containerID="0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3" Nov 22 07:25:54 crc kubenswrapper[4929]: E1122 07:25:54.238020 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3\": container with ID starting with 0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3 not found: ID does not exist" containerID="0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.238054 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3"} err="failed to get container status \"0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3\": rpc error: code = NotFound desc = could not find container \"0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3\": container with ID starting with 0192b4492723b654b2a17ba392e47e06083cd8ea16e71726dd1e3715750d79e3 not found: ID does not exist" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.238075 4929 scope.go:117] "RemoveContainer" containerID="fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2" Nov 22 07:25:54 crc kubenswrapper[4929]: E1122 07:25:54.238318 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2\": container with ID starting with fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2 not found: ID does not exist" containerID="fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2" Nov 22 07:25:54 crc kubenswrapper[4929]: I1122 07:25:54.238347 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2"} err="failed to get container status \"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2\": rpc error: code = NotFound desc = could not find container \"fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2\": container with ID starting with fe081a6e76372d0ae609bfd5de9f7fa5ab464d45af4a45b5365254e1489cc7f2 not found: ID does not exist" Nov 22 07:25:55 crc kubenswrapper[4929]: I1122 07:25:55.964523 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" path="/var/lib/kubelet/pods/8e3d6e00-6de1-4857-b7af-f41adca708ce/volumes" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.276912 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:25:59 crc kubenswrapper[4929]: E1122 07:25:59.277478 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="registry-server" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.277494 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="registry-server" Nov 22 07:25:59 crc kubenswrapper[4929]: E1122 07:25:59.277511 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="extract-utilities" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.277519 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="extract-utilities" Nov 22 07:25:59 crc kubenswrapper[4929]: E1122 07:25:59.277532 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="extract-content" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.277542 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="extract-content" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.277664 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e3d6e00-6de1-4857-b7af-f41adca708ce" containerName="registry-server" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.278651 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.289798 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.363533 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.364518 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.364736 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pscc\" (UniqueName: \"kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.465809 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pscc\" (UniqueName: \"kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.465915 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.465951 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.466574 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.467076 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.474616 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.475882 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.494065 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pscc\" (UniqueName: \"kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc\") pod \"certified-operators-28wht\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.494470 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.567181 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.567266 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcvdh\" (UniqueName: \"kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.567300 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.599744 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.668064 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.668123 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcvdh\" (UniqueName: \"kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.668149 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.668765 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.668963 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.688951 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcvdh\" (UniqueName: \"kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh\") pod \"community-operators-xld4b\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:25:59 crc kubenswrapper[4929]: I1122 07:25:59.832739 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.075132 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.076558 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.088172 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.190758 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds2vw\" (UniqueName: \"kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.190810 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.190970 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.292227 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds2vw\" (UniqueName: \"kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.292272 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.292316 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.292761 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.292967 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.313339 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds2vw\" (UniqueName: \"kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw\") pod \"redhat-marketplace-ntsbw\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:01 crc kubenswrapper[4929]: I1122 07:26:01.393455 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:06 crc kubenswrapper[4929]: E1122 07:26:06.618094 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec" Nov 22 07:26:06 crc kubenswrapper[4929]: E1122 07:26:06.618329 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p_openshift-operators(ddfa0602-ba6c-4d22-9842-a824a8b4a5b4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:26:06 crc kubenswrapper[4929]: E1122 07:26:06.619683 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" podUID="ddfa0602-ba6c-4d22-9842-a824a8b4a5b4" Nov 22 07:26:07 crc kubenswrapper[4929]: E1122 07:26:07.239748 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:43d33f0125e6b990f4a972ac4e952a065d7e72dc1690c6c836963b7341734aec\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" podUID="ddfa0602-ba6c-4d22-9842-a824a8b4a5b4" Nov 22 07:26:11 crc kubenswrapper[4929]: I1122 07:26:11.261914 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" event={"ID":"180a8947-9c9f-4870-bfec-07b7cb8c378a","Type":"ContainerStarted","Data":"4a43e79ca3ce556d0f480064cf401e0378cb780cbacea0426a3b8501324e9575"} Nov 22 07:26:11 crc kubenswrapper[4929]: I1122 07:26:11.294570 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-htblg" podStartSLOduration=1.527233892 podStartE2EDuration="19.294544335s" podCreationTimestamp="2025-11-22 07:25:52 +0000 UTC" firstStartedPulling="2025-11-22 07:25:53.248761278 +0000 UTC m=+890.358215291" lastFinishedPulling="2025-11-22 07:26:11.016071721 +0000 UTC m=+908.125525734" observedRunningTime="2025-11-22 07:26:11.286942432 +0000 UTC m=+908.396396445" watchObservedRunningTime="2025-11-22 07:26:11.294544335 +0000 UTC m=+908.403998358" Nov 22 07:26:11 crc kubenswrapper[4929]: I1122 07:26:11.301679 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:11 crc kubenswrapper[4929]: I1122 07:26:11.360235 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:26:11 crc kubenswrapper[4929]: I1122 07:26:11.421541 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.269841 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" event={"ID":"117a2144-28d4-4377-973e-3eea96a6a609","Type":"ContainerStarted","Data":"0f47ef7f338e5f4d4b332c3d535fd2d4f6d2c3f8d38123121577574a2ed6d37f"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.271948 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.273704 4929 generic.go:334] "Generic (PLEG): container finished" podID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerID="531a54c832c7031f8ab7b824eff94bc54d7a20f755001918e239cf4e81d451e7" exitCode=0 Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.273761 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerDied","Data":"531a54c832c7031f8ab7b824eff94bc54d7a20f755001918e239cf4e81d451e7"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.273779 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerStarted","Data":"49df9550757fdb9b951d56e150c97342dda001b2d1345aa90d8055b34d94a22d"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.274395 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.276756 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" event={"ID":"b8383ad3-1d58-4c89-ab4b-874351c249f3","Type":"ContainerStarted","Data":"497eea3cd5aabf70bc3081aff27667ad32db2ff4f5d33fcdf1bad4ee9e6757f7"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.278507 4929 generic.go:334] "Generic (PLEG): container finished" podID="c8419d21-4df2-48cf-8295-d579e1131f64" containerID="928eb8fd4a65d2fa4632e6fb0cc82b47b5b7766864fcb7363c5f216f3854ab6c" exitCode=0 Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.278562 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerDied","Data":"928eb8fd4a65d2fa4632e6fb0cc82b47b5b7766864fcb7363c5f216f3854ab6c"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.278583 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerStarted","Data":"10151ffe14eee3c6a629efaa32a73478ec0f5ae9629e98a847faca8098f57301"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.280852 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" event={"ID":"1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8","Type":"ContainerStarted","Data":"1ee62f0272f02c652c1655304b7f5afd62b863e4cdc15e6a64f09c6ac3f51e0d"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.281366 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.285227 4929 generic.go:334] "Generic (PLEG): container finished" podID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerID="4723873587d6bc2b8e559295b76967389a6c9d9f9888118c9ec3b58dabd333eb" exitCode=0 Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.285393 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerDied","Data":"4723873587d6bc2b8e559295b76967389a6c9d9f9888118c9ec3b58dabd333eb"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.285473 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerStarted","Data":"9da55c26bcba23d43d165841262deb68fb425140d82ff97acb5c5a633fc1fe83"} Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.315923 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-h45rh" podStartSLOduration=2.343223443 podStartE2EDuration="20.315903396s" podCreationTimestamp="2025-11-22 07:25:52 +0000 UTC" firstStartedPulling="2025-11-22 07:25:53.095483629 +0000 UTC m=+890.204937642" lastFinishedPulling="2025-11-22 07:26:11.068163572 +0000 UTC m=+908.177617595" observedRunningTime="2025-11-22 07:26:12.312672504 +0000 UTC m=+909.422126537" watchObservedRunningTime="2025-11-22 07:26:12.315903396 +0000 UTC m=+909.425357409" Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.354791 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" podStartSLOduration=2.609386765 podStartE2EDuration="20.354770782s" podCreationTimestamp="2025-11-22 07:25:52 +0000 UTC" firstStartedPulling="2025-11-22 07:25:53.321934814 +0000 UTC m=+890.431388827" lastFinishedPulling="2025-11-22 07:26:11.067318831 +0000 UTC m=+908.176772844" observedRunningTime="2025-11-22 07:26:12.349423366 +0000 UTC m=+909.458877399" watchObservedRunningTime="2025-11-22 07:26:12.354770782 +0000 UTC m=+909.464224805" Nov 22 07:26:12 crc kubenswrapper[4929]: I1122 07:26:12.482750 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-88c4k" podStartSLOduration=2.415703622 podStartE2EDuration="20.482730138s" podCreationTimestamp="2025-11-22 07:25:52 +0000 UTC" firstStartedPulling="2025-11-22 07:25:53.024338705 +0000 UTC m=+890.133792718" lastFinishedPulling="2025-11-22 07:26:11.091365221 +0000 UTC m=+908.200819234" observedRunningTime="2025-11-22 07:26:12.454545133 +0000 UTC m=+909.563999146" watchObservedRunningTime="2025-11-22 07:26:12.482730138 +0000 UTC m=+909.592184151" Nov 22 07:26:13 crc kubenswrapper[4929]: I1122 07:26:13.292828 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerStarted","Data":"e0324ed25253f57c50dc642d104f44c354061c417d604ce89200247bc596cc0c"} Nov 22 07:26:13 crc kubenswrapper[4929]: I1122 07:26:13.294555 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerStarted","Data":"7c99977b73c289ae883e4d779a3521262985979aa2e0c4540fc94bc9aebf7383"} Nov 22 07:26:13 crc kubenswrapper[4929]: I1122 07:26:13.296975 4929 generic.go:334] "Generic (PLEG): container finished" podID="c8419d21-4df2-48cf-8295-d579e1131f64" containerID="76a5225f244bda794092148e90182b1bd6a112fd1459dedf2f3854f954816399" exitCode=0 Nov 22 07:26:13 crc kubenswrapper[4929]: I1122 07:26:13.298027 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerDied","Data":"76a5225f244bda794092148e90182b1bd6a112fd1459dedf2f3854f954816399"} Nov 22 07:26:14 crc kubenswrapper[4929]: I1122 07:26:14.304836 4929 generic.go:334] "Generic (PLEG): container finished" podID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerID="7c99977b73c289ae883e4d779a3521262985979aa2e0c4540fc94bc9aebf7383" exitCode=0 Nov 22 07:26:14 crc kubenswrapper[4929]: I1122 07:26:14.304893 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerDied","Data":"7c99977b73c289ae883e4d779a3521262985979aa2e0c4540fc94bc9aebf7383"} Nov 22 07:26:14 crc kubenswrapper[4929]: I1122 07:26:14.308265 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerStarted","Data":"c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f"} Nov 22 07:26:14 crc kubenswrapper[4929]: I1122 07:26:14.312107 4929 generic.go:334] "Generic (PLEG): container finished" podID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerID="e0324ed25253f57c50dc642d104f44c354061c417d604ce89200247bc596cc0c" exitCode=0 Nov 22 07:26:14 crc kubenswrapper[4929]: I1122 07:26:14.312352 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerDied","Data":"e0324ed25253f57c50dc642d104f44c354061c417d604ce89200247bc596cc0c"} Nov 22 07:26:15 crc kubenswrapper[4929]: I1122 07:26:15.320527 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerStarted","Data":"5f639273a6d0e13455889f5d97c4f92b8832d0e348be756cce31fe9044efeed9"} Nov 22 07:26:15 crc kubenswrapper[4929]: I1122 07:26:15.323272 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerStarted","Data":"320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134"} Nov 22 07:26:15 crc kubenswrapper[4929]: I1122 07:26:15.347744 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ntsbw" podStartSLOduration=12.617127206 podStartE2EDuration="14.347725627s" podCreationTimestamp="2025-11-22 07:26:01 +0000 UTC" firstStartedPulling="2025-11-22 07:26:12.279530363 +0000 UTC m=+909.388984376" lastFinishedPulling="2025-11-22 07:26:14.010128784 +0000 UTC m=+911.119582797" observedRunningTime="2025-11-22 07:26:14.383222489 +0000 UTC m=+911.492676532" watchObservedRunningTime="2025-11-22 07:26:15.347725627 +0000 UTC m=+912.457179640" Nov 22 07:26:15 crc kubenswrapper[4929]: I1122 07:26:15.348568 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xld4b" podStartSLOduration=13.867448717 podStartE2EDuration="16.348561538s" podCreationTimestamp="2025-11-22 07:25:59 +0000 UTC" firstStartedPulling="2025-11-22 07:26:12.274872075 +0000 UTC m=+909.384326088" lastFinishedPulling="2025-11-22 07:26:14.755984896 +0000 UTC m=+911.865438909" observedRunningTime="2025-11-22 07:26:15.345445709 +0000 UTC m=+912.454899732" watchObservedRunningTime="2025-11-22 07:26:15.348561538 +0000 UTC m=+912.458015551" Nov 22 07:26:15 crc kubenswrapper[4929]: I1122 07:26:15.366859 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-28wht" podStartSLOduration=13.753580419 podStartE2EDuration="16.366838802s" podCreationTimestamp="2025-11-22 07:25:59 +0000 UTC" firstStartedPulling="2025-11-22 07:26:12.286742756 +0000 UTC m=+909.396196759" lastFinishedPulling="2025-11-22 07:26:14.900001129 +0000 UTC m=+912.009455142" observedRunningTime="2025-11-22 07:26:15.364293697 +0000 UTC m=+912.473747730" watchObservedRunningTime="2025-11-22 07:26:15.366838802 +0000 UTC m=+912.476292815" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.600425 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.600795 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.646670 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.834093 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.834168 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:19 crc kubenswrapper[4929]: I1122 07:26:19.868837 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:20 crc kubenswrapper[4929]: I1122 07:26:20.385164 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:20 crc kubenswrapper[4929]: I1122 07:26:20.390574 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:21 crc kubenswrapper[4929]: I1122 07:26:21.394441 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:21 crc kubenswrapper[4929]: I1122 07:26:21.394808 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:21 crc kubenswrapper[4929]: I1122 07:26:21.433884 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:22 crc kubenswrapper[4929]: I1122 07:26:22.364468 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" event={"ID":"ddfa0602-ba6c-4d22-9842-a824a8b4a5b4","Type":"ContainerStarted","Data":"fbc4e18b418204b4116dfd928edc9ee855f50c4a35ff807438009c5fbe73a771"} Nov 22 07:26:22 crc kubenswrapper[4929]: I1122 07:26:22.380951 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p" podStartSLOduration=-9223372006.473845 podStartE2EDuration="30.380930426s" podCreationTimestamp="2025-11-22 07:25:52 +0000 UTC" firstStartedPulling="2025-11-22 07:25:52.947024973 +0000 UTC m=+890.056478986" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:26:22.379133981 +0000 UTC m=+919.488588004" watchObservedRunningTime="2025-11-22 07:26:22.380930426 +0000 UTC m=+919.490384439" Nov 22 07:26:22 crc kubenswrapper[4929]: I1122 07:26:22.409498 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:23 crc kubenswrapper[4929]: I1122 07:26:23.006423 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-t5xzk" Nov 22 07:26:23 crc kubenswrapper[4929]: I1122 07:26:23.503865 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:23 crc kubenswrapper[4929]: I1122 07:26:23.903837 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:26:23 crc kubenswrapper[4929]: I1122 07:26:23.904079 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xld4b" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="registry-server" containerID="cri-o://5f639273a6d0e13455889f5d97c4f92b8832d0e348be756cce31fe9044efeed9" gracePeriod=2 Nov 22 07:26:25 crc kubenswrapper[4929]: I1122 07:26:25.383729 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ntsbw" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="registry-server" containerID="cri-o://c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" gracePeriod=2 Nov 22 07:26:26 crc kubenswrapper[4929]: I1122 07:26:26.391266 4929 generic.go:334] "Generic (PLEG): container finished" podID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerID="5f639273a6d0e13455889f5d97c4f92b8832d0e348be756cce31fe9044efeed9" exitCode=0 Nov 22 07:26:26 crc kubenswrapper[4929]: I1122 07:26:26.391313 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerDied","Data":"5f639273a6d0e13455889f5d97c4f92b8832d0e348be756cce31fe9044efeed9"} Nov 22 07:26:27 crc kubenswrapper[4929]: I1122 07:26:27.104674 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:26:27 crc kubenswrapper[4929]: I1122 07:26:27.105005 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-28wht" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="registry-server" containerID="cri-o://320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" gracePeriod=2 Nov 22 07:26:28 crc kubenswrapper[4929]: I1122 07:26:28.353466 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:28 crc kubenswrapper[4929]: I1122 07:26:28.419386 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xld4b" event={"ID":"be1b287a-52ec-4cc1-8803-2f017a9518a4","Type":"ContainerDied","Data":"49df9550757fdb9b951d56e150c97342dda001b2d1345aa90d8055b34d94a22d"} Nov 22 07:26:28 crc kubenswrapper[4929]: I1122 07:26:28.419440 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xld4b" Nov 22 07:26:28 crc kubenswrapper[4929]: I1122 07:26:28.419444 4929 scope.go:117] "RemoveContainer" containerID="5f639273a6d0e13455889f5d97c4f92b8832d0e348be756cce31fe9044efeed9" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.455886 4929 scope.go:117] "RemoveContainer" containerID="7c99977b73c289ae883e4d779a3521262985979aa2e0c4540fc94bc9aebf7383" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.480814 4929 scope.go:117] "RemoveContainer" containerID="531a54c832c7031f8ab7b824eff94bc54d7a20f755001918e239cf4e81d451e7" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.487026 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities\") pod \"be1b287a-52ec-4cc1-8803-2f017a9518a4\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.487133 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcvdh\" (UniqueName: \"kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh\") pod \"be1b287a-52ec-4cc1-8803-2f017a9518a4\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.487196 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content\") pod \"be1b287a-52ec-4cc1-8803-2f017a9518a4\" (UID: \"be1b287a-52ec-4cc1-8803-2f017a9518a4\") " Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.488124 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities" (OuterVolumeSpecName: "utilities") pod "be1b287a-52ec-4cc1-8803-2f017a9518a4" (UID: "be1b287a-52ec-4cc1-8803-2f017a9518a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.492464 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh" (OuterVolumeSpecName: "kube-api-access-zcvdh") pod "be1b287a-52ec-4cc1-8803-2f017a9518a4" (UID: "be1b287a-52ec-4cc1-8803-2f017a9518a4"). InnerVolumeSpecName "kube-api-access-zcvdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.588693 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:28.589032 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcvdh\" (UniqueName: \"kubernetes.io/projected/be1b287a-52ec-4cc1-8803-2f017a9518a4-kube-api-access-zcvdh\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:29.430755 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ntsbw_c8419d21-4df2-48cf-8295-d579e1131f64/registry-server/0.log" Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:29.431756 4929 generic.go:334] "Generic (PLEG): container finished" podID="c8419d21-4df2-48cf-8295-d579e1131f64" containerID="c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" exitCode=137 Nov 22 07:26:29 crc kubenswrapper[4929]: I1122 07:26:29.431808 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerDied","Data":"c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f"} Nov 22 07:26:29 crc kubenswrapper[4929]: E1122 07:26:29.600926 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134 is running failed: container process not found" containerID="320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:29 crc kubenswrapper[4929]: E1122 07:26:29.601609 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134 is running failed: container process not found" containerID="320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:29 crc kubenswrapper[4929]: E1122 07:26:29.601890 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134 is running failed: container process not found" containerID="320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:29 crc kubenswrapper[4929]: E1122 07:26:29.601947 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-28wht" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="registry-server" Nov 22 07:26:31 crc kubenswrapper[4929]: E1122 07:26:31.394200 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f is running failed: container process not found" containerID="c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:31 crc kubenswrapper[4929]: E1122 07:26:31.395094 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f is running failed: container process not found" containerID="c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:31 crc kubenswrapper[4929]: E1122 07:26:31.395537 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f is running failed: container process not found" containerID="c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:26:31 crc kubenswrapper[4929]: E1122 07:26:31.395605 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-ntsbw" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="registry-server" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.439863 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ntsbw_c8419d21-4df2-48cf-8295-d579e1131f64/registry-server/0.log" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.440696 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.447717 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-ntsbw_c8419d21-4df2-48cf-8295-d579e1131f64/registry-server/0.log" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.449675 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ntsbw" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.449696 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ntsbw" event={"ID":"c8419d21-4df2-48cf-8295-d579e1131f64","Type":"ContainerDied","Data":"10151ffe14eee3c6a629efaa32a73478ec0f5ae9629e98a847faca8098f57301"} Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.449812 4929 scope.go:117] "RemoveContainer" containerID="c58743e27b3f650bd89f6c1ca7a66f9f12a244bb5b848c5cfd1110d137d6fc8f" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.453473 4929 generic.go:334] "Generic (PLEG): container finished" podID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerID="320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" exitCode=0 Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.453518 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerDied","Data":"320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134"} Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.488504 4929 scope.go:117] "RemoveContainer" containerID="76a5225f244bda794092148e90182b1bd6a112fd1459dedf2f3854f954816399" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.519011 4929 scope.go:117] "RemoveContainer" containerID="928eb8fd4a65d2fa4632e6fb0cc82b47b5b7766864fcb7363c5f216f3854ab6c" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.523962 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content\") pod \"c8419d21-4df2-48cf-8295-d579e1131f64\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.524001 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds2vw\" (UniqueName: \"kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw\") pod \"c8419d21-4df2-48cf-8295-d579e1131f64\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.524028 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities\") pod \"c8419d21-4df2-48cf-8295-d579e1131f64\" (UID: \"c8419d21-4df2-48cf-8295-d579e1131f64\") " Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.524738 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities" (OuterVolumeSpecName: "utilities") pod "c8419d21-4df2-48cf-8295-d579e1131f64" (UID: "c8419d21-4df2-48cf-8295-d579e1131f64"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.531119 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw" (OuterVolumeSpecName: "kube-api-access-ds2vw") pod "c8419d21-4df2-48cf-8295-d579e1131f64" (UID: "c8419d21-4df2-48cf-8295-d579e1131f64"). InnerVolumeSpecName "kube-api-access-ds2vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.625418 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds2vw\" (UniqueName: \"kubernetes.io/projected/c8419d21-4df2-48cf-8295-d579e1131f64-kube-api-access-ds2vw\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:31 crc kubenswrapper[4929]: I1122 07:26:31.625785 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:32 crc kubenswrapper[4929]: I1122 07:26:32.073476 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c8419d21-4df2-48cf-8295-d579e1131f64" (UID: "c8419d21-4df2-48cf-8295-d579e1131f64"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:32 crc kubenswrapper[4929]: I1122 07:26:32.131136 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8419d21-4df2-48cf-8295-d579e1131f64-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:32 crc kubenswrapper[4929]: I1122 07:26:32.383234 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:32 crc kubenswrapper[4929]: I1122 07:26:32.397040 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ntsbw"] Nov 22 07:26:33 crc kubenswrapper[4929]: I1122 07:26:33.958500 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" path="/var/lib/kubelet/pods/c8419d21-4df2-48cf-8295-d579e1131f64/volumes" Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.852718 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.869959 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pscc\" (UniqueName: \"kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc\") pod \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.870042 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities\") pod \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.870081 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content\") pod \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\" (UID: \"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388\") " Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.878578 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc" (OuterVolumeSpecName: "kube-api-access-6pscc") pod "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" (UID: "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388"). InnerVolumeSpecName "kube-api-access-6pscc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.881334 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities" (OuterVolumeSpecName: "utilities") pod "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" (UID: "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.972456 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:34 crc kubenswrapper[4929]: I1122 07:26:34.972501 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pscc\" (UniqueName: \"kubernetes.io/projected/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-kube-api-access-6pscc\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.482837 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-28wht" event={"ID":"f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388","Type":"ContainerDied","Data":"9da55c26bcba23d43d165841262deb68fb425140d82ff97acb5c5a633fc1fe83"} Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.482888 4929 scope.go:117] "RemoveContainer" containerID="320a9ca2ee1a57d404d34af027201192b118c1fe807031d3c735839fff16f134" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.483001 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-28wht" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.507006 4929 scope.go:117] "RemoveContainer" containerID="e0324ed25253f57c50dc642d104f44c354061c417d604ce89200247bc596cc0c" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.522747 4929 scope.go:117] "RemoveContainer" containerID="4723873587d6bc2b8e559295b76967389a6c9d9f9888118c9ec3b58dabd333eb" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.917565 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" (UID: "f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:35 crc kubenswrapper[4929]: I1122 07:26:35.984914 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:36 crc kubenswrapper[4929]: I1122 07:26:36.104461 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:26:36 crc kubenswrapper[4929]: I1122 07:26:36.115120 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-28wht"] Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.654112 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be1b287a-52ec-4cc1-8803-2f017a9518a4" (UID: "be1b287a-52ec-4cc1-8803-2f017a9518a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.704810 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be1b287a-52ec-4cc1-8803-2f017a9518a4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.744931 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.750116 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xld4b"] Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.961550 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" path="/var/lib/kubelet/pods/be1b287a-52ec-4cc1-8803-2f017a9518a4/volumes" Nov 22 07:26:37 crc kubenswrapper[4929]: I1122 07:26:37.962773 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" path="/var/lib/kubelet/pods/f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388/volumes" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.352526 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d"] Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353468 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353488 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353504 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353511 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353523 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353532 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353544 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353551 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353573 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353579 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353589 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353598 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="extract-content" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353609 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353615 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353626 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353633 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="extract-utilities" Nov 22 07:26:42 crc kubenswrapper[4929]: E1122 07:26:42.353645 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353652 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353756 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="be1b287a-52ec-4cc1-8803-2f017a9518a4" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353766 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8419d21-4df2-48cf-8295-d579e1131f64" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.353778 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5dc8ece-0e82-4b05-8f3c-adcf7fe5c388" containerName="registry-server" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.354620 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.357824 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.370975 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d"] Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.464134 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.464179 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzvbb\" (UniqueName: \"kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.464265 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.565846 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.565912 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.565932 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzvbb\" (UniqueName: \"kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.589762 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.596057 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.799640 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzvbb\" (UniqueName: \"kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:42 crc kubenswrapper[4929]: I1122 07:26:42.827880 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:43 crc kubenswrapper[4929]: I1122 07:26:43.074049 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d"] Nov 22 07:26:43 crc kubenswrapper[4929]: I1122 07:26:43.530314 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" event={"ID":"03b26279-625b-4afd-be4e-8bd77491463b","Type":"ContainerStarted","Data":"2672439c6d6f0ac147d92341e58aadaed8796dbc3fd94b176ce1123805b26512"} Nov 22 07:26:44 crc kubenswrapper[4929]: I1122 07:26:44.537085 4929 generic.go:334] "Generic (PLEG): container finished" podID="03b26279-625b-4afd-be4e-8bd77491463b" containerID="2d445250c746a9b2579cde068d7817e29872bab6a99445bc4403f9c3d347f94c" exitCode=0 Nov 22 07:26:44 crc kubenswrapper[4929]: I1122 07:26:44.537144 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" event={"ID":"03b26279-625b-4afd-be4e-8bd77491463b","Type":"ContainerDied","Data":"2d445250c746a9b2579cde068d7817e29872bab6a99445bc4403f9c3d347f94c"} Nov 22 07:26:49 crc kubenswrapper[4929]: I1122 07:26:49.567051 4929 generic.go:334] "Generic (PLEG): container finished" podID="03b26279-625b-4afd-be4e-8bd77491463b" containerID="7fae06ee46d6d828122a6156c5ba01da74cf175f2f2c05d0bbe8d06cbb2e36de" exitCode=0 Nov 22 07:26:49 crc kubenswrapper[4929]: I1122 07:26:49.567133 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" event={"ID":"03b26279-625b-4afd-be4e-8bd77491463b","Type":"ContainerDied","Data":"7fae06ee46d6d828122a6156c5ba01da74cf175f2f2c05d0bbe8d06cbb2e36de"} Nov 22 07:26:50 crc kubenswrapper[4929]: I1122 07:26:50.577122 4929 generic.go:334] "Generic (PLEG): container finished" podID="03b26279-625b-4afd-be4e-8bd77491463b" containerID="cdfdefb15f6c44c4f986df8f81a259666b41319132e65c5c5f647c87fa46ca34" exitCode=0 Nov 22 07:26:50 crc kubenswrapper[4929]: I1122 07:26:50.577169 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" event={"ID":"03b26279-625b-4afd-be4e-8bd77491463b","Type":"ContainerDied","Data":"cdfdefb15f6c44c4f986df8f81a259666b41319132e65c5c5f647c87fa46ca34"} Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.826694 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.917434 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzvbb\" (UniqueName: \"kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb\") pod \"03b26279-625b-4afd-be4e-8bd77491463b\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.917499 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle\") pod \"03b26279-625b-4afd-be4e-8bd77491463b\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.917544 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util\") pod \"03b26279-625b-4afd-be4e-8bd77491463b\" (UID: \"03b26279-625b-4afd-be4e-8bd77491463b\") " Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.918675 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle" (OuterVolumeSpecName: "bundle") pod "03b26279-625b-4afd-be4e-8bd77491463b" (UID: "03b26279-625b-4afd-be4e-8bd77491463b"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.923423 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb" (OuterVolumeSpecName: "kube-api-access-mzvbb") pod "03b26279-625b-4afd-be4e-8bd77491463b" (UID: "03b26279-625b-4afd-be4e-8bd77491463b"). InnerVolumeSpecName "kube-api-access-mzvbb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:26:51 crc kubenswrapper[4929]: I1122 07:26:51.930273 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util" (OuterVolumeSpecName: "util") pod "03b26279-625b-4afd-be4e-8bd77491463b" (UID: "03b26279-625b-4afd-be4e-8bd77491463b"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.018992 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzvbb\" (UniqueName: \"kubernetes.io/projected/03b26279-625b-4afd-be4e-8bd77491463b-kube-api-access-mzvbb\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.019036 4929 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.019046 4929 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/03b26279-625b-4afd-be4e-8bd77491463b-util\") on node \"crc\" DevicePath \"\"" Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.590185 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" event={"ID":"03b26279-625b-4afd-be4e-8bd77491463b","Type":"ContainerDied","Data":"2672439c6d6f0ac147d92341e58aadaed8796dbc3fd94b176ce1123805b26512"} Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.590543 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2672439c6d6f0ac147d92341e58aadaed8796dbc3fd94b176ce1123805b26512" Nov 22 07:26:52 crc kubenswrapper[4929]: I1122 07:26:52.590254 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.275925 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-pl877"] Nov 22 07:26:55 crc kubenswrapper[4929]: E1122 07:26:55.276254 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="pull" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.276272 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="pull" Nov 22 07:26:55 crc kubenswrapper[4929]: E1122 07:26:55.276296 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="extract" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.276304 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="extract" Nov 22 07:26:55 crc kubenswrapper[4929]: E1122 07:26:55.276313 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="util" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.276320 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="util" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.276459 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="03b26279-625b-4afd-be4e-8bd77491463b" containerName="extract" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.276955 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.279067 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-gxrch" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.279214 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.282406 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.298666 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-pl877"] Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.461144 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6qxr\" (UniqueName: \"kubernetes.io/projected/8235a65f-ef76-4fee-9bcf-3d0fc36d330a-kube-api-access-x6qxr\") pod \"nmstate-operator-557fdffb88-pl877\" (UID: \"8235a65f-ef76-4fee-9bcf-3d0fc36d330a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.563194 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6qxr\" (UniqueName: \"kubernetes.io/projected/8235a65f-ef76-4fee-9bcf-3d0fc36d330a-kube-api-access-x6qxr\") pod \"nmstate-operator-557fdffb88-pl877\" (UID: \"8235a65f-ef76-4fee-9bcf-3d0fc36d330a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.582016 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6qxr\" (UniqueName: \"kubernetes.io/projected/8235a65f-ef76-4fee-9bcf-3d0fc36d330a-kube-api-access-x6qxr\") pod \"nmstate-operator-557fdffb88-pl877\" (UID: \"8235a65f-ef76-4fee-9bcf-3d0fc36d330a\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.593767 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" Nov 22 07:26:55 crc kubenswrapper[4929]: I1122 07:26:55.857344 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-pl877"] Nov 22 07:26:55 crc kubenswrapper[4929]: W1122 07:26:55.860730 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8235a65f_ef76_4fee_9bcf_3d0fc36d330a.slice/crio-d412ae6f0bb0c40abe6ba4677ebc15d0ccca17f5a8c6e08b0b113b363923e5b7 WatchSource:0}: Error finding container d412ae6f0bb0c40abe6ba4677ebc15d0ccca17f5a8c6e08b0b113b363923e5b7: Status 404 returned error can't find the container with id d412ae6f0bb0c40abe6ba4677ebc15d0ccca17f5a8c6e08b0b113b363923e5b7 Nov 22 07:26:56 crc kubenswrapper[4929]: I1122 07:26:56.618685 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" event={"ID":"8235a65f-ef76-4fee-9bcf-3d0fc36d330a","Type":"ContainerStarted","Data":"d412ae6f0bb0c40abe6ba4677ebc15d0ccca17f5a8c6e08b0b113b363923e5b7"} Nov 22 07:27:00 crc kubenswrapper[4929]: I1122 07:27:00.645345 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" event={"ID":"8235a65f-ef76-4fee-9bcf-3d0fc36d330a","Type":"ContainerStarted","Data":"2fd6d0be9513c6ea0233226a9f2ff43e03a7ac72961ca2f6ccbe98036bf0bcc4"} Nov 22 07:27:00 crc kubenswrapper[4929]: I1122 07:27:00.666093 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-pl877" podStartSLOduration=1.649879883 podStartE2EDuration="5.666066336s" podCreationTimestamp="2025-11-22 07:26:55 +0000 UTC" firstStartedPulling="2025-11-22 07:26:55.86334264 +0000 UTC m=+952.972796663" lastFinishedPulling="2025-11-22 07:26:59.879529093 +0000 UTC m=+956.988983116" observedRunningTime="2025-11-22 07:27:00.661190863 +0000 UTC m=+957.770644886" watchObservedRunningTime="2025-11-22 07:27:00.666066336 +0000 UTC m=+957.775520369" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.845101 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d"] Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.846269 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.848001 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-4vpvj" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.852523 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr"] Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.853157 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.856552 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.859059 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d"] Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.868419 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr"] Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.903956 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-hbj5r"] Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.904640 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.994984 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.995342 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gskxx\" (UniqueName: \"kubernetes.io/projected/8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b-kube-api-access-gskxx\") pod \"nmstate-metrics-5dcf9c57c5-mrx5d\" (UID: \"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" Nov 22 07:27:02 crc kubenswrapper[4929]: I1122 07:27:02.995390 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkpw7\" (UniqueName: \"kubernetes.io/projected/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-kube-api-access-gkpw7\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.004492 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4"] Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.005149 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.007145 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.007228 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-ltsxx" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.007749 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.024066 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4"] Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096109 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkpw7\" (UniqueName: \"kubernetes.io/projected/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-kube-api-access-gkpw7\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096169 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-ovs-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096494 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096552 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqrzc\" (UniqueName: \"kubernetes.io/projected/fe99af20-a569-4615-9155-4656a022f118-kube-api-access-hqrzc\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096656 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-dbus-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: E1122 07:27:03.096738 4929 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096764 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gskxx\" (UniqueName: \"kubernetes.io/projected/8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b-kube-api-access-gskxx\") pod \"nmstate-metrics-5dcf9c57c5-mrx5d\" (UID: \"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" Nov 22 07:27:03 crc kubenswrapper[4929]: E1122 07:27:03.096842 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair podName:d03ab878-ec20-4ce1-be9d-bad1a6f35b99 nodeName:}" failed. No retries permitted until 2025-11-22 07:27:03.59681568 +0000 UTC m=+960.706269693 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair") pod "nmstate-webhook-6b89b748d8-g7tnr" (UID: "d03ab878-ec20-4ce1-be9d-bad1a6f35b99") : secret "openshift-nmstate-webhook" not found Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.096888 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-nmstate-lock\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.117370 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkpw7\" (UniqueName: \"kubernetes.io/projected/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-kube-api-access-gkpw7\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.117957 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gskxx\" (UniqueName: \"kubernetes.io/projected/8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b-kube-api-access-gskxx\") pod \"nmstate-metrics-5dcf9c57c5-mrx5d\" (UID: \"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.176460 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197741 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-ovs-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197801 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b597195-80c1-40e6-8617-0bd8ce0d81bd-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197855 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqrzc\" (UniqueName: \"kubernetes.io/projected/fe99af20-a569-4615-9155-4656a022f118-kube-api-access-hqrzc\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197889 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj8s8\" (UniqueName: \"kubernetes.io/projected/7b597195-80c1-40e6-8617-0bd8ce0d81bd-kube-api-access-vj8s8\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197894 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-ovs-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.197924 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-dbus-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.198075 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-nmstate-lock\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.198135 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7b597195-80c1-40e6-8617-0bd8ce0d81bd-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.198132 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-nmstate-lock\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.198326 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/fe99af20-a569-4615-9155-4656a022f118-dbus-socket\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.226264 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqrzc\" (UniqueName: \"kubernetes.io/projected/fe99af20-a569-4615-9155-4656a022f118-kube-api-access-hqrzc\") pod \"nmstate-handler-hbj5r\" (UID: \"fe99af20-a569-4615-9155-4656a022f118\") " pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.228504 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.229560 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5897df5b9-t8l54"] Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.230316 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.263380 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5897df5b9-t8l54"] Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299087 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-console-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299398 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj8s8\" (UniqueName: \"kubernetes.io/projected/7b597195-80c1-40e6-8617-0bd8ce0d81bd-kube-api-access-vj8s8\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299418 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48nsv\" (UniqueName: \"kubernetes.io/projected/986f3053-9b37-478c-a7ac-7a2996fedef6-kube-api-access-48nsv\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299450 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299470 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7b597195-80c1-40e6-8617-0bd8ce0d81bd-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299509 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-service-ca\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299546 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-oauth-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299586 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-oauth-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299616 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-trusted-ca-bundle\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.299645 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b597195-80c1-40e6-8617-0bd8ce0d81bd-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.300276 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/7b597195-80c1-40e6-8617-0bd8ce0d81bd-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.303793 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b597195-80c1-40e6-8617-0bd8ce0d81bd-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.320934 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj8s8\" (UniqueName: \"kubernetes.io/projected/7b597195-80c1-40e6-8617-0bd8ce0d81bd-kube-api-access-vj8s8\") pod \"nmstate-console-plugin-5874bd7bc5-fmlm4\" (UID: \"7b597195-80c1-40e6-8617-0bd8ce0d81bd\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.399951 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-oauth-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.399987 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-service-ca\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.400011 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-oauth-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.400034 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-trusted-ca-bundle\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.400074 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-console-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.400093 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48nsv\" (UniqueName: \"kubernetes.io/projected/986f3053-9b37-478c-a7ac-7a2996fedef6-kube-api-access-48nsv\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.400126 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.402126 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-trusted-ca-bundle\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.402408 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-console-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.403034 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-oauth-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.403444 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/986f3053-9b37-478c-a7ac-7a2996fedef6-service-ca\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.403741 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-oauth-config\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.413538 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/986f3053-9b37-478c-a7ac-7a2996fedef6-console-serving-cert\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.441481 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48nsv\" (UniqueName: \"kubernetes.io/projected/986f3053-9b37-478c-a7ac-7a2996fedef6-kube-api-access-48nsv\") pod \"console-5897df5b9-t8l54\" (UID: \"986f3053-9b37-478c-a7ac-7a2996fedef6\") " pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.602645 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.605459 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/d03ab878-ec20-4ce1-be9d-bad1a6f35b99-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-g7tnr\" (UID: \"d03ab878-ec20-4ce1-be9d-bad1a6f35b99\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.607177 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.619857 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.630282 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d"] Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.671949 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-hbj5r" event={"ID":"fe99af20-a569-4615-9155-4656a022f118","Type":"ContainerStarted","Data":"f9d4f7db9144913c555d5806a9faba7905591265be09c4932144339202b3098f"} Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.676635 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" event={"ID":"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b","Type":"ContainerStarted","Data":"4b7bafe44fad58631830e38c7439880b70d7fbaf2098861dc1d4859358cb5465"} Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.792400 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:03 crc kubenswrapper[4929]: I1122 07:27:03.846570 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4"] Nov 22 07:27:03 crc kubenswrapper[4929]: W1122 07:27:03.854589 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b597195_80c1_40e6_8617_0bd8ce0d81bd.slice/crio-69e405382dc771068d2067cd9ff10fd3736f94f0d3f2f4c7f3b700e780b1d7dd WatchSource:0}: Error finding container 69e405382dc771068d2067cd9ff10fd3736f94f0d3f2f4c7f3b700e780b1d7dd: Status 404 returned error can't find the container with id 69e405382dc771068d2067cd9ff10fd3736f94f0d3f2f4c7f3b700e780b1d7dd Nov 22 07:27:04 crc kubenswrapper[4929]: I1122 07:27:04.022653 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr"] Nov 22 07:27:04 crc kubenswrapper[4929]: I1122 07:27:04.027411 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5897df5b9-t8l54"] Nov 22 07:27:04 crc kubenswrapper[4929]: I1122 07:27:04.684429 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5897df5b9-t8l54" event={"ID":"986f3053-9b37-478c-a7ac-7a2996fedef6","Type":"ContainerStarted","Data":"fe97fb7e366dc3c4bce6c615779c5685abdc04e08e3e25d5370f400fc71ffc29"} Nov 22 07:27:04 crc kubenswrapper[4929]: I1122 07:27:04.686372 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" event={"ID":"d03ab878-ec20-4ce1-be9d-bad1a6f35b99","Type":"ContainerStarted","Data":"e47f574cc2eb2ca37368c6f58e9ae31db96520a74370968afd197ef1c26a9b7f"} Nov 22 07:27:04 crc kubenswrapper[4929]: I1122 07:27:04.687975 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" event={"ID":"7b597195-80c1-40e6-8617-0bd8ce0d81bd","Type":"ContainerStarted","Data":"69e405382dc771068d2067cd9ff10fd3736f94f0d3f2f4c7f3b700e780b1d7dd"} Nov 22 07:27:05 crc kubenswrapper[4929]: I1122 07:27:05.694414 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5897df5b9-t8l54" event={"ID":"986f3053-9b37-478c-a7ac-7a2996fedef6","Type":"ContainerStarted","Data":"6402fbfbd79a5681ca1415ecf0635015ca0ddc0eeb363a97d703343436342844"} Nov 22 07:27:05 crc kubenswrapper[4929]: I1122 07:27:05.713706 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5897df5b9-t8l54" podStartSLOduration=2.713688276 podStartE2EDuration="2.713688276s" podCreationTimestamp="2025-11-22 07:27:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:27:05.709998042 +0000 UTC m=+962.819452085" watchObservedRunningTime="2025-11-22 07:27:05.713688276 +0000 UTC m=+962.823142289" Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.735847 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" event={"ID":"d03ab878-ec20-4ce1-be9d-bad1a6f35b99","Type":"ContainerStarted","Data":"b5cd4e81f98911bb434e1b4c038ec45b98f733fadeacf66df0cb1afd91fbb5a0"} Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.736432 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.737425 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" event={"ID":"7b597195-80c1-40e6-8617-0bd8ce0d81bd","Type":"ContainerStarted","Data":"35438a6c0e328861f6cba439344109a89200733faded0d73b499b7a5f0cea61c"} Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.738725 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-hbj5r" event={"ID":"fe99af20-a569-4615-9155-4656a022f118","Type":"ContainerStarted","Data":"e0f8681ae8b2a7e95ddec274ca404fb406af31774384ed01cb902b99fed60062"} Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.738829 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.739890 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" event={"ID":"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b","Type":"ContainerStarted","Data":"40d6c83f360b7bf593a29b792c6b224e32de04d08ebcf79b267ea79c65638720"} Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.754137 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" podStartSLOduration=2.986033885 podStartE2EDuration="8.754114531s" podCreationTimestamp="2025-11-22 07:27:02 +0000 UTC" firstStartedPulling="2025-11-22 07:27:04.02241068 +0000 UTC m=+961.131864693" lastFinishedPulling="2025-11-22 07:27:09.790491286 +0000 UTC m=+966.899945339" observedRunningTime="2025-11-22 07:27:10.748441897 +0000 UTC m=+967.857895950" watchObservedRunningTime="2025-11-22 07:27:10.754114531 +0000 UTC m=+967.863568584" Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.768661 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-hbj5r" podStartSLOduration=2.24161374 podStartE2EDuration="8.768644599s" podCreationTimestamp="2025-11-22 07:27:02 +0000 UTC" firstStartedPulling="2025-11-22 07:27:03.248735824 +0000 UTC m=+960.358189837" lastFinishedPulling="2025-11-22 07:27:09.775766653 +0000 UTC m=+966.885220696" observedRunningTime="2025-11-22 07:27:10.767198883 +0000 UTC m=+967.876652916" watchObservedRunningTime="2025-11-22 07:27:10.768644599 +0000 UTC m=+967.878098612" Nov 22 07:27:10 crc kubenswrapper[4929]: I1122 07:27:10.786894 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-fmlm4" podStartSLOduration=2.869619702 podStartE2EDuration="8.786869862s" podCreationTimestamp="2025-11-22 07:27:02 +0000 UTC" firstStartedPulling="2025-11-22 07:27:03.860529194 +0000 UTC m=+960.969983217" lastFinishedPulling="2025-11-22 07:27:09.777779324 +0000 UTC m=+966.887233377" observedRunningTime="2025-11-22 07:27:10.778753516 +0000 UTC m=+967.888207539" watchObservedRunningTime="2025-11-22 07:27:10.786869862 +0000 UTC m=+967.896323885" Nov 22 07:27:13 crc kubenswrapper[4929]: I1122 07:27:13.608447 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:13 crc kubenswrapper[4929]: I1122 07:27:13.611273 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:13 crc kubenswrapper[4929]: I1122 07:27:13.617101 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:13 crc kubenswrapper[4929]: I1122 07:27:13.764815 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5897df5b9-t8l54" Nov 22 07:27:13 crc kubenswrapper[4929]: I1122 07:27:13.855622 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:27:17 crc kubenswrapper[4929]: I1122 07:27:17.794860 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" event={"ID":"8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b","Type":"ContainerStarted","Data":"a1f45589d9d99d43d615e413734ee320346e7354e9a74743f5cf40a6ebd47c6a"} Nov 22 07:27:17 crc kubenswrapper[4929]: I1122 07:27:17.816999 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-mrx5d" podStartSLOduration=1.989706322 podStartE2EDuration="15.816972762s" podCreationTimestamp="2025-11-22 07:27:02 +0000 UTC" firstStartedPulling="2025-11-22 07:27:03.642530694 +0000 UTC m=+960.751984707" lastFinishedPulling="2025-11-22 07:27:17.469797084 +0000 UTC m=+974.579251147" observedRunningTime="2025-11-22 07:27:17.814085738 +0000 UTC m=+974.923539751" watchObservedRunningTime="2025-11-22 07:27:17.816972762 +0000 UTC m=+974.926426815" Nov 22 07:27:18 crc kubenswrapper[4929]: I1122 07:27:18.251579 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-hbj5r" Nov 22 07:27:23 crc kubenswrapper[4929]: I1122 07:27:23.798030 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-g7tnr" Nov 22 07:27:38 crc kubenswrapper[4929]: I1122 07:27:38.913679 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-tqwhs" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" containerID="cri-o://1903a286d770690a0bbdbbd0c4400b018a6a501df140bb430e9e265621608bbe" gracePeriod=15 Nov 22 07:27:39 crc kubenswrapper[4929]: I1122 07:27:39.216626 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqwhs_de2ba535-d661-453d-b4cd-19c6e7628b0c/console/0.log" Nov 22 07:27:39 crc kubenswrapper[4929]: I1122 07:27:39.216965 4929 generic.go:334] "Generic (PLEG): container finished" podID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerID="1903a286d770690a0bbdbbd0c4400b018a6a501df140bb430e9e265621608bbe" exitCode=2 Nov 22 07:27:39 crc kubenswrapper[4929]: I1122 07:27:39.216998 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqwhs" event={"ID":"de2ba535-d661-453d-b4cd-19c6e7628b0c","Type":"ContainerDied","Data":"1903a286d770690a0bbdbbd0c4400b018a6a501df140bb430e9e265621608bbe"} Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.390938 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqwhs_de2ba535-d661-453d-b4cd-19c6e7628b0c/console/0.log" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.391513 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.454589 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.454911 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.454977 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455009 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455046 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455067 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455130 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrx4h\" (UniqueName: \"kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h\") pod \"de2ba535-d661-453d-b4cd-19c6e7628b0c\" (UID: \"de2ba535-d661-453d-b4cd-19c6e7628b0c\") " Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455831 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config" (OuterVolumeSpecName: "console-config") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.455964 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.456013 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.456425 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca" (OuterVolumeSpecName: "service-ca") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.468781 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.469031 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h" (OuterVolumeSpecName: "kube-api-access-rrx4h") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "kube-api-access-rrx4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.469600 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "de2ba535-d661-453d-b4cd-19c6e7628b0c" (UID: "de2ba535-d661-453d-b4cd-19c6e7628b0c"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556440 4929 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556470 4929 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556479 4929 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556491 4929 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556499 4929 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556507 4929 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/de2ba535-d661-453d-b4cd-19c6e7628b0c-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:40 crc kubenswrapper[4929]: I1122 07:27:40.556515 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrx4h\" (UniqueName: \"kubernetes.io/projected/de2ba535-d661-453d-b4cd-19c6e7628b0c-kube-api-access-rrx4h\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.233570 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-tqwhs_de2ba535-d661-453d-b4cd-19c6e7628b0c/console/0.log" Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.233911 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-tqwhs" event={"ID":"de2ba535-d661-453d-b4cd-19c6e7628b0c","Type":"ContainerDied","Data":"722fcf86a8cdca52dffd33cc1a24d418605b36767e96a4d67c830613d14dbfd2"} Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.233951 4929 scope.go:117] "RemoveContainer" containerID="1903a286d770690a0bbdbbd0c4400b018a6a501df140bb430e9e265621608bbe" Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.233996 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-tqwhs" Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.276960 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.280391 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-tqwhs"] Nov 22 07:27:41 crc kubenswrapper[4929]: I1122 07:27:41.970903 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" path="/var/lib/kubelet/pods/de2ba535-d661-453d-b4cd-19c6e7628b0c/volumes" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.551967 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r"] Nov 22 07:27:47 crc kubenswrapper[4929]: E1122 07:27:47.553088 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.553115 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.553340 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="de2ba535-d661-453d-b4cd-19c6e7628b0c" containerName="console" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.554897 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.557613 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.559864 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r"] Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.659469 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.659586 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttfgs\" (UniqueName: \"kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.659644 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.761048 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.761140 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.761177 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttfgs\" (UniqueName: \"kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.761604 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.761620 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.787419 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttfgs\" (UniqueName: \"kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:47 crc kubenswrapper[4929]: I1122 07:27:47.886502 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:48 crc kubenswrapper[4929]: I1122 07:27:48.105231 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r"] Nov 22 07:27:48 crc kubenswrapper[4929]: I1122 07:27:48.282621 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerStarted","Data":"3273bb14312fb2ab008feb8a32ddc16b6d12c5b7afa499eac890cc5cc8e34922"} Nov 22 07:27:48 crc kubenswrapper[4929]: I1122 07:27:48.282677 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerStarted","Data":"677d0aba93b34ac35bc54d1638eab1c5a3dacd89d44095039438209eaf6f943a"} Nov 22 07:27:49 crc kubenswrapper[4929]: I1122 07:27:49.288546 4929 generic.go:334] "Generic (PLEG): container finished" podID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerID="3273bb14312fb2ab008feb8a32ddc16b6d12c5b7afa499eac890cc5cc8e34922" exitCode=0 Nov 22 07:27:49 crc kubenswrapper[4929]: I1122 07:27:49.288633 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerDied","Data":"3273bb14312fb2ab008feb8a32ddc16b6d12c5b7afa499eac890cc5cc8e34922"} Nov 22 07:27:51 crc kubenswrapper[4929]: I1122 07:27:51.302856 4929 generic.go:334] "Generic (PLEG): container finished" podID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerID="c6c63e941d6f2080d330390eb606dfcf7cefb6fc8495c01e0c8fd8cd086e2b6a" exitCode=0 Nov 22 07:27:51 crc kubenswrapper[4929]: I1122 07:27:51.302919 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerDied","Data":"c6c63e941d6f2080d330390eb606dfcf7cefb6fc8495c01e0c8fd8cd086e2b6a"} Nov 22 07:27:52 crc kubenswrapper[4929]: I1122 07:27:52.315227 4929 generic.go:334] "Generic (PLEG): container finished" podID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerID="4312178f1c05d14eed716c6e50c8550a64c51332fbbc8bf17ff6d1dbe62e2f62" exitCode=0 Nov 22 07:27:52 crc kubenswrapper[4929]: I1122 07:27:52.315342 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerDied","Data":"4312178f1c05d14eed716c6e50c8550a64c51332fbbc8bf17ff6d1dbe62e2f62"} Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.602895 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.739148 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util\") pod \"17709bed-38bb-4897-8dcd-86c17b6763ec\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.739340 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttfgs\" (UniqueName: \"kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs\") pod \"17709bed-38bb-4897-8dcd-86c17b6763ec\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.739475 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle\") pod \"17709bed-38bb-4897-8dcd-86c17b6763ec\" (UID: \"17709bed-38bb-4897-8dcd-86c17b6763ec\") " Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.740810 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle" (OuterVolumeSpecName: "bundle") pod "17709bed-38bb-4897-8dcd-86c17b6763ec" (UID: "17709bed-38bb-4897-8dcd-86c17b6763ec"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.746576 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs" (OuterVolumeSpecName: "kube-api-access-ttfgs") pod "17709bed-38bb-4897-8dcd-86c17b6763ec" (UID: "17709bed-38bb-4897-8dcd-86c17b6763ec"). InnerVolumeSpecName "kube-api-access-ttfgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.841626 4929 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:53 crc kubenswrapper[4929]: I1122 07:27:53.841670 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttfgs\" (UniqueName: \"kubernetes.io/projected/17709bed-38bb-4897-8dcd-86c17b6763ec-kube-api-access-ttfgs\") on node \"crc\" DevicePath \"\"" Nov 22 07:27:54 crc kubenswrapper[4929]: I1122 07:27:54.292224 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util" (OuterVolumeSpecName: "util") pod "17709bed-38bb-4897-8dcd-86c17b6763ec" (UID: "17709bed-38bb-4897-8dcd-86c17b6763ec"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:27:54 crc kubenswrapper[4929]: I1122 07:27:54.331438 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" event={"ID":"17709bed-38bb-4897-8dcd-86c17b6763ec","Type":"ContainerDied","Data":"677d0aba93b34ac35bc54d1638eab1c5a3dacd89d44095039438209eaf6f943a"} Nov 22 07:27:54 crc kubenswrapper[4929]: I1122 07:27:54.331484 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="677d0aba93b34ac35bc54d1638eab1c5a3dacd89d44095039438209eaf6f943a" Nov 22 07:27:54 crc kubenswrapper[4929]: I1122 07:27:54.331528 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r" Nov 22 07:27:54 crc kubenswrapper[4929]: I1122 07:27:54.349506 4929 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/17709bed-38bb-4897-8dcd-86c17b6763ec-util\") on node \"crc\" DevicePath \"\"" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.584915 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654"] Nov 22 07:28:02 crc kubenswrapper[4929]: E1122 07:28:02.585703 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="extract" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.585716 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="extract" Nov 22 07:28:02 crc kubenswrapper[4929]: E1122 07:28:02.585732 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="pull" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.585737 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="pull" Nov 22 07:28:02 crc kubenswrapper[4929]: E1122 07:28:02.585747 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="util" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.585753 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="util" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.585845 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="17709bed-38bb-4897-8dcd-86c17b6763ec" containerName="extract" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.586225 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.588427 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.589131 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.589235 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.589360 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-ccmsq" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.590342 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.605066 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654"] Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.654363 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-webhook-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.654487 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-apiservice-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.654524 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvsdp\" (UniqueName: \"kubernetes.io/projected/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-kube-api-access-tvsdp\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.755698 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-apiservice-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.755757 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvsdp\" (UniqueName: \"kubernetes.io/projected/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-kube-api-access-tvsdp\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.755803 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-webhook-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.761065 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-webhook-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.763949 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-apiservice-cert\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.787607 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvsdp\" (UniqueName: \"kubernetes.io/projected/8ce2ff51-c2c3-4e0d-b6fc-73259af320ab-kube-api-access-tvsdp\") pod \"metallb-operator-controller-manager-7bc5fd5f85-qf654\" (UID: \"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab\") " pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.902340 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.941966 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7"] Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.942925 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.946075 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-qjq4h" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.946106 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.946080 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 07:28:02 crc kubenswrapper[4929]: I1122 07:28:02.958136 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7"] Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.058269 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-apiservice-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.058325 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-webhook-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.058360 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhpfb\" (UniqueName: \"kubernetes.io/projected/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-kube-api-access-qhpfb\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.137596 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654"] Nov 22 07:28:03 crc kubenswrapper[4929]: W1122 07:28:03.148004 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ce2ff51_c2c3_4e0d_b6fc_73259af320ab.slice/crio-6f1e5a784a78035eaba3d4aeaabc3fde5d41a58e206aac9709891f3fd58bd2b0 WatchSource:0}: Error finding container 6f1e5a784a78035eaba3d4aeaabc3fde5d41a58e206aac9709891f3fd58bd2b0: Status 404 returned error can't find the container with id 6f1e5a784a78035eaba3d4aeaabc3fde5d41a58e206aac9709891f3fd58bd2b0 Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.160097 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-apiservice-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.160157 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-webhook-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.160247 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhpfb\" (UniqueName: \"kubernetes.io/projected/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-kube-api-access-qhpfb\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.165710 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-apiservice-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.166299 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-webhook-cert\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.180168 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhpfb\" (UniqueName: \"kubernetes.io/projected/8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197-kube-api-access-qhpfb\") pod \"metallb-operator-webhook-server-9d7d9d8fc-9m2z7\" (UID: \"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197\") " pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.268011 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.386926 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" event={"ID":"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab","Type":"ContainerStarted","Data":"6f1e5a784a78035eaba3d4aeaabc3fde5d41a58e206aac9709891f3fd58bd2b0"} Nov 22 07:28:03 crc kubenswrapper[4929]: I1122 07:28:03.515081 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7"] Nov 22 07:28:03 crc kubenswrapper[4929]: W1122 07:28:03.521153 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a3d08f2_6eea_44d4_9f2a_dd1bc67f4197.slice/crio-10cfca64209207aab5390393e0ff754e2d0d63f35f6f999daab35842113558dd WatchSource:0}: Error finding container 10cfca64209207aab5390393e0ff754e2d0d63f35f6f999daab35842113558dd: Status 404 returned error can't find the container with id 10cfca64209207aab5390393e0ff754e2d0d63f35f6f999daab35842113558dd Nov 22 07:28:04 crc kubenswrapper[4929]: I1122 07:28:04.395272 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" event={"ID":"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197","Type":"ContainerStarted","Data":"10cfca64209207aab5390393e0ff754e2d0d63f35f6f999daab35842113558dd"} Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.430117 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" event={"ID":"8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197","Type":"ContainerStarted","Data":"b0c4ca3c8dcc1a63fa467b69e6f5836b41dd3cdd9b27ed2005e1b36562436494"} Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.430712 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.431444 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" event={"ID":"8ce2ff51-c2c3-4e0d-b6fc-73259af320ab","Type":"ContainerStarted","Data":"a118dd3eee5ed06948692b5dc110df70dc86a4bcc7bbaff1a23b7d2e55e4ed71"} Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.431554 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.448518 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" podStartSLOduration=2.4413117890000002 podStartE2EDuration="8.448484739s" podCreationTimestamp="2025-11-22 07:28:02 +0000 UTC" firstStartedPulling="2025-11-22 07:28:03.524754737 +0000 UTC m=+1020.634208750" lastFinishedPulling="2025-11-22 07:28:09.531927687 +0000 UTC m=+1026.641381700" observedRunningTime="2025-11-22 07:28:10.446753845 +0000 UTC m=+1027.556207858" watchObservedRunningTime="2025-11-22 07:28:10.448484739 +0000 UTC m=+1027.557938752" Nov 22 07:28:10 crc kubenswrapper[4929]: I1122 07:28:10.472336 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" podStartSLOduration=2.108007893 podStartE2EDuration="8.472314553s" podCreationTimestamp="2025-11-22 07:28:02 +0000 UTC" firstStartedPulling="2025-11-22 07:28:03.150320258 +0000 UTC m=+1020.259774271" lastFinishedPulling="2025-11-22 07:28:09.514626878 +0000 UTC m=+1026.624080931" observedRunningTime="2025-11-22 07:28:10.468504177 +0000 UTC m=+1027.577958190" watchObservedRunningTime="2025-11-22 07:28:10.472314553 +0000 UTC m=+1027.581768566" Nov 22 07:28:18 crc kubenswrapper[4929]: I1122 07:28:18.595106 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:28:18 crc kubenswrapper[4929]: I1122 07:28:18.595733 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:28:23 crc kubenswrapper[4929]: I1122 07:28:23.272168 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-9d7d9d8fc-9m2z7" Nov 22 07:28:42 crc kubenswrapper[4929]: I1122 07:28:42.906915 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7bc5fd5f85-qf654" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.744442 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-4vf85"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.747861 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.749604 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.749715 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-799bh" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.750557 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.756151 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ph794"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.757197 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.759996 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.776811 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ph794"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.782877 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-metrics\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783038 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-reloader\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783135 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-sockets\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783260 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-cert\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783367 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59xnn\" (UniqueName: \"kubernetes.io/projected/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-kube-api-access-59xnn\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783481 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-conf\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783609 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5419f653-54d8-407c-bf43-0258a3b4451c-frr-startup\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783728 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5419f653-54d8-407c-bf43-0258a3b4451c-metrics-certs\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.783862 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8s79\" (UniqueName: \"kubernetes.io/projected/5419f653-54d8-407c-bf43-0258a3b4451c-kube-api-access-v8s79\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.822668 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-f8zsn"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.823601 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.824917 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-5hl2t" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.825298 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.825988 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.826745 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.834052 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-hpt9g"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.834920 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.836276 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.858063 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hpt9g"] Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884297 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5419f653-54d8-407c-bf43-0258a3b4451c-metrics-certs\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884349 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metallb-excludel2\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884374 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8s79\" (UniqueName: \"kubernetes.io/projected/5419f653-54d8-407c-bf43-0258a3b4451c-kube-api-access-v8s79\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884402 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884422 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7vpr\" (UniqueName: \"kubernetes.io/projected/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-kube-api-access-g7vpr\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884444 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt9wc\" (UniqueName: \"kubernetes.io/projected/129ee7fb-2597-475d-882e-8064f3e4b4fc-kube-api-access-nt9wc\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884587 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-metrics\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884609 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-reloader\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884626 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-sockets\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884646 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-cert\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884664 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metrics-certs\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884683 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59xnn\" (UniqueName: \"kubernetes.io/projected/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-kube-api-access-59xnn\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884713 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-conf\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884737 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884772 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5419f653-54d8-407c-bf43-0258a3b4451c-frr-startup\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.884795 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-cert\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.885149 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-metrics\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.885365 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-reloader\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.885502 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-conf\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.885878 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/5419f653-54d8-407c-bf43-0258a3b4451c-frr-startup\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.886116 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/5419f653-54d8-407c-bf43-0258a3b4451c-frr-sockets\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.889960 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-cert\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.901919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59xnn\" (UniqueName: \"kubernetes.io/projected/636d3dfa-e47c-487c-a23f-4c4b8f65a69c-kube-api-access-59xnn\") pod \"frr-k8s-webhook-server-6998585d5-ph794\" (UID: \"636d3dfa-e47c-487c-a23f-4c4b8f65a69c\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.902093 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8s79\" (UniqueName: \"kubernetes.io/projected/5419f653-54d8-407c-bf43-0258a3b4451c-kube-api-access-v8s79\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.905116 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5419f653-54d8-407c-bf43-0258a3b4451c-metrics-certs\") pod \"frr-k8s-4vf85\" (UID: \"5419f653-54d8-407c-bf43-0258a3b4451c\") " pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985555 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985641 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-cert\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985684 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metallb-excludel2\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985726 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: E1122 07:28:44.985732 4929 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985754 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7vpr\" (UniqueName: \"kubernetes.io/projected/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-kube-api-access-g7vpr\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985782 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt9wc\" (UniqueName: \"kubernetes.io/projected/129ee7fb-2597-475d-882e-8064f3e4b4fc-kube-api-access-nt9wc\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:44 crc kubenswrapper[4929]: E1122 07:28:44.985802 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist podName:e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf nodeName:}" failed. No retries permitted until 2025-11-22 07:28:45.485784439 +0000 UTC m=+1062.595238452 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist") pod "speaker-f8zsn" (UID: "e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf") : secret "metallb-memberlist" not found Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.985828 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metrics-certs\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: E1122 07:28:44.986167 4929 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 22 07:28:44 crc kubenswrapper[4929]: E1122 07:28:44.986253 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs podName:129ee7fb-2597-475d-882e-8064f3e4b4fc nodeName:}" failed. No retries permitted until 2025-11-22 07:28:45.48623433 +0000 UTC m=+1062.595688423 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs") pod "controller-6c7b4b5f48-hpt9g" (UID: "129ee7fb-2597-475d-882e-8064f3e4b4fc") : secret "controller-certs-secret" not found Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.986541 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metallb-excludel2\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:44 crc kubenswrapper[4929]: I1122 07:28:44.990267 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-metrics-certs\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.002418 4929 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.010278 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-cert\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.021833 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7vpr\" (UniqueName: \"kubernetes.io/projected/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-kube-api-access-g7vpr\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.027189 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt9wc\" (UniqueName: \"kubernetes.io/projected/129ee7fb-2597-475d-882e-8064f3e4b4fc-kube-api-access-nt9wc\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.068394 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.083300 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.312803 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ph794"] Nov 22 07:28:45 crc kubenswrapper[4929]: W1122 07:28:45.320477 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod636d3dfa_e47c_487c_a23f_4c4b8f65a69c.slice/crio-4d4cc8abd82aa68ca6c19a5361a023a9e9c16da9b9e0b98968b6b6ee36bc44e6 WatchSource:0}: Error finding container 4d4cc8abd82aa68ca6c19a5361a023a9e9c16da9b9e0b98968b6b6ee36bc44e6: Status 404 returned error can't find the container with id 4d4cc8abd82aa68ca6c19a5361a023a9e9c16da9b9e0b98968b6b6ee36bc44e6 Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.495441 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.495549 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:45 crc kubenswrapper[4929]: E1122 07:28:45.495614 4929 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 07:28:45 crc kubenswrapper[4929]: E1122 07:28:45.495695 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist podName:e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf nodeName:}" failed. No retries permitted until 2025-11-22 07:28:46.495674515 +0000 UTC m=+1063.605128548 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist") pod "speaker-f8zsn" (UID: "e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf") : secret "metallb-memberlist" not found Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.502052 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/129ee7fb-2597-475d-882e-8064f3e4b4fc-metrics-certs\") pod \"controller-6c7b4b5f48-hpt9g\" (UID: \"129ee7fb-2597-475d-882e-8064f3e4b4fc\") " pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.652444 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"6ce3b23758d8d0a86933be32e3f8081155f1b1808a814f121436a9b298da9fbc"} Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.653750 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" event={"ID":"636d3dfa-e47c-487c-a23f-4c4b8f65a69c","Type":"ContainerStarted","Data":"4d4cc8abd82aa68ca6c19a5361a023a9e9c16da9b9e0b98968b6b6ee36bc44e6"} Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.747537 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:45 crc kubenswrapper[4929]: I1122 07:28:45.955093 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-hpt9g"] Nov 22 07:28:45 crc kubenswrapper[4929]: W1122 07:28:45.965479 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod129ee7fb_2597_475d_882e_8064f3e4b4fc.slice/crio-02cacf2cf6626366be3dfa3c21034d4377fb9585a27fd93a640cd1245caa0dfc WatchSource:0}: Error finding container 02cacf2cf6626366be3dfa3c21034d4377fb9585a27fd93a640cd1245caa0dfc: Status 404 returned error can't find the container with id 02cacf2cf6626366be3dfa3c21034d4377fb9585a27fd93a640cd1245caa0dfc Nov 22 07:28:46 crc kubenswrapper[4929]: I1122 07:28:46.506994 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:46 crc kubenswrapper[4929]: E1122 07:28:46.507159 4929 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 07:28:46 crc kubenswrapper[4929]: E1122 07:28:46.507545 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist podName:e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf nodeName:}" failed. No retries permitted until 2025-11-22 07:28:48.507522866 +0000 UTC m=+1065.616976879 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist") pod "speaker-f8zsn" (UID: "e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf") : secret "metallb-memberlist" not found Nov 22 07:28:46 crc kubenswrapper[4929]: I1122 07:28:46.691934 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hpt9g" event={"ID":"129ee7fb-2597-475d-882e-8064f3e4b4fc","Type":"ContainerStarted","Data":"cdf36926d9ed86eab5a29bed59bb375e79370340461de60f2de34e1180cbbc29"} Nov 22 07:28:46 crc kubenswrapper[4929]: I1122 07:28:46.691981 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hpt9g" event={"ID":"129ee7fb-2597-475d-882e-8064f3e4b4fc","Type":"ContainerStarted","Data":"02cacf2cf6626366be3dfa3c21034d4377fb9585a27fd93a640cd1245caa0dfc"} Nov 22 07:28:47 crc kubenswrapper[4929]: I1122 07:28:47.713333 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-hpt9g" event={"ID":"129ee7fb-2597-475d-882e-8064f3e4b4fc","Type":"ContainerStarted","Data":"973095675e74f1c17e1145adbceed73d95b3e52a7115c69145d714cdbbdcaebd"} Nov 22 07:28:47 crc kubenswrapper[4929]: I1122 07:28:47.713790 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:28:47 crc kubenswrapper[4929]: I1122 07:28:47.732802 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-hpt9g" podStartSLOduration=3.732785162 podStartE2EDuration="3.732785162s" podCreationTimestamp="2025-11-22 07:28:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:28:47.73150741 +0000 UTC m=+1064.840961443" watchObservedRunningTime="2025-11-22 07:28:47.732785162 +0000 UTC m=+1064.842239175" Nov 22 07:28:48 crc kubenswrapper[4929]: I1122 07:28:48.531391 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:48 crc kubenswrapper[4929]: I1122 07:28:48.539842 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf-memberlist\") pod \"speaker-f8zsn\" (UID: \"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf\") " pod="metallb-system/speaker-f8zsn" Nov 22 07:28:48 crc kubenswrapper[4929]: I1122 07:28:48.594616 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:28:48 crc kubenswrapper[4929]: I1122 07:28:48.594677 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:28:48 crc kubenswrapper[4929]: I1122 07:28:48.737619 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-f8zsn" Nov 22 07:28:48 crc kubenswrapper[4929]: W1122 07:28:48.769895 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2d7c1fd_e426_4a70_9c6f_3aff0bb0e1bf.slice/crio-3bb8eec7056be40a82824a36c08311816f0a14010857f38f5d1e27cb53694eb5 WatchSource:0}: Error finding container 3bb8eec7056be40a82824a36c08311816f0a14010857f38f5d1e27cb53694eb5: Status 404 returned error can't find the container with id 3bb8eec7056be40a82824a36c08311816f0a14010857f38f5d1e27cb53694eb5 Nov 22 07:28:49 crc kubenswrapper[4929]: I1122 07:28:49.728139 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f8zsn" event={"ID":"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf","Type":"ContainerStarted","Data":"e85e743ec7c3afe01af48628e66d499ecff1bbdb537e7e4d7aee802c9026a736"} Nov 22 07:28:49 crc kubenswrapper[4929]: I1122 07:28:49.728480 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f8zsn" event={"ID":"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf","Type":"ContainerStarted","Data":"008f7c422042aa2a0693c5e7cd105a8fdc9a79362beabf13fd039fbec34fa062"} Nov 22 07:28:49 crc kubenswrapper[4929]: I1122 07:28:49.728495 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-f8zsn" event={"ID":"e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf","Type":"ContainerStarted","Data":"3bb8eec7056be40a82824a36c08311816f0a14010857f38f5d1e27cb53694eb5"} Nov 22 07:28:49 crc kubenswrapper[4929]: I1122 07:28:49.728634 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-f8zsn" Nov 22 07:28:49 crc kubenswrapper[4929]: I1122 07:28:49.745811 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-f8zsn" podStartSLOduration=5.745793166 podStartE2EDuration="5.745793166s" podCreationTimestamp="2025-11-22 07:28:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:28:49.740989015 +0000 UTC m=+1066.850443048" watchObservedRunningTime="2025-11-22 07:28:49.745793166 +0000 UTC m=+1066.855247169" Nov 22 07:28:53 crc kubenswrapper[4929]: I1122 07:28:53.750793 4929 generic.go:334] "Generic (PLEG): container finished" podID="5419f653-54d8-407c-bf43-0258a3b4451c" containerID="ae2231a795b9ce6ef14e04e66986025bd3f1030511012c59076356b402502e10" exitCode=0 Nov 22 07:28:53 crc kubenswrapper[4929]: I1122 07:28:53.750851 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerDied","Data":"ae2231a795b9ce6ef14e04e66986025bd3f1030511012c59076356b402502e10"} Nov 22 07:28:53 crc kubenswrapper[4929]: I1122 07:28:53.752428 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" event={"ID":"636d3dfa-e47c-487c-a23f-4c4b8f65a69c","Type":"ContainerStarted","Data":"a2ce9ca4e7a5e3302305665ffe96f25a50020d6129118b714054782eb44126f9"} Nov 22 07:28:53 crc kubenswrapper[4929]: I1122 07:28:53.752634 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:28:53 crc kubenswrapper[4929]: I1122 07:28:53.807861 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" podStartSLOduration=1.71624849 podStartE2EDuration="9.807837551s" podCreationTimestamp="2025-11-22 07:28:44 +0000 UTC" firstStartedPulling="2025-11-22 07:28:45.322883845 +0000 UTC m=+1062.432337858" lastFinishedPulling="2025-11-22 07:28:53.414472916 +0000 UTC m=+1070.523926919" observedRunningTime="2025-11-22 07:28:53.802621709 +0000 UTC m=+1070.912075722" watchObservedRunningTime="2025-11-22 07:28:53.807837551 +0000 UTC m=+1070.917291574" Nov 22 07:28:54 crc kubenswrapper[4929]: I1122 07:28:54.774101 4929 generic.go:334] "Generic (PLEG): container finished" podID="5419f653-54d8-407c-bf43-0258a3b4451c" containerID="515ee7e3875c18d58222dd3a1845d6ef32b2d5a8b0debc0f52cfd8daf3555d08" exitCode=0 Nov 22 07:28:54 crc kubenswrapper[4929]: I1122 07:28:54.774286 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerDied","Data":"515ee7e3875c18d58222dd3a1845d6ef32b2d5a8b0debc0f52cfd8daf3555d08"} Nov 22 07:28:55 crc kubenswrapper[4929]: I1122 07:28:55.788344 4929 generic.go:334] "Generic (PLEG): container finished" podID="5419f653-54d8-407c-bf43-0258a3b4451c" containerID="a085cb9e3c0fd4973975b51d0067710237b9ab66511e7646303d2041577ded53" exitCode=0 Nov 22 07:28:55 crc kubenswrapper[4929]: I1122 07:28:55.788680 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerDied","Data":"a085cb9e3c0fd4973975b51d0067710237b9ab66511e7646303d2041577ded53"} Nov 22 07:28:56 crc kubenswrapper[4929]: I1122 07:28:56.800711 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"62e20a576e49f14cd14e2daceec5ca95338db0a892053adc602285b6fd45c8e8"} Nov 22 07:28:58 crc kubenswrapper[4929]: I1122 07:28:58.823038 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"f839c34891f775e9e9689b50eaa353c59a6f455cced152f34b058ccc26f291d7"} Nov 22 07:28:58 crc kubenswrapper[4929]: I1122 07:28:58.823371 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"18309783fba857b43f7fb49068c864e8db918ad0bfb6cef880809d24f355258c"} Nov 22 07:28:59 crc kubenswrapper[4929]: I1122 07:28:59.841959 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"7bb8d071716c47167fb95d8fdbf4ec499d6be0966dc4d531a68942806d9bc2ac"} Nov 22 07:29:00 crc kubenswrapper[4929]: I1122 07:29:00.852821 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"cd7bf626fa82bd5383d03a6a30c0b742fbe0e3147778d13a6811bb56e05d2764"} Nov 22 07:29:02 crc kubenswrapper[4929]: I1122 07:29:02.876126 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-4vf85" event={"ID":"5419f653-54d8-407c-bf43-0258a3b4451c","Type":"ContainerStarted","Data":"80b58b4bf00a2329bc894fcbdb799b37a5b33630afe51e49b068cf63ddc0661b"} Nov 22 07:29:02 crc kubenswrapper[4929]: I1122 07:29:02.876651 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:29:02 crc kubenswrapper[4929]: I1122 07:29:02.903606 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-4vf85" podStartSLOduration=10.778370611 podStartE2EDuration="18.903589691s" podCreationTimestamp="2025-11-22 07:28:44 +0000 UTC" firstStartedPulling="2025-11-22 07:28:45.237926031 +0000 UTC m=+1062.347380034" lastFinishedPulling="2025-11-22 07:28:53.363145101 +0000 UTC m=+1070.472599114" observedRunningTime="2025-11-22 07:29:02.898738128 +0000 UTC m=+1080.008192151" watchObservedRunningTime="2025-11-22 07:29:02.903589691 +0000 UTC m=+1080.013043704" Nov 22 07:29:05 crc kubenswrapper[4929]: I1122 07:29:05.069527 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:29:05 crc kubenswrapper[4929]: I1122 07:29:05.090129 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ph794" Nov 22 07:29:05 crc kubenswrapper[4929]: I1122 07:29:05.178460 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:29:05 crc kubenswrapper[4929]: I1122 07:29:05.752718 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-hpt9g" Nov 22 07:29:08 crc kubenswrapper[4929]: I1122 07:29:08.742530 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-f8zsn" Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.908195 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.909836 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.913056 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-wfc2l" Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.914739 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.920722 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.933855 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:11 crc kubenswrapper[4929]: I1122 07:29:11.986462 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cvtj\" (UniqueName: \"kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj\") pod \"openstack-operator-index-hfw9c\" (UID: \"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551\") " pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.088144 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cvtj\" (UniqueName: \"kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj\") pod \"openstack-operator-index-hfw9c\" (UID: \"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551\") " pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.110169 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cvtj\" (UniqueName: \"kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj\") pod \"openstack-operator-index-hfw9c\" (UID: \"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551\") " pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.233727 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.650931 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:12 crc kubenswrapper[4929]: W1122 07:29:12.659096 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod308f3b2c_99ff_4dc1_bbfe_80ccc0b13551.slice/crio-9d8e3ecc5723080b00363ed0eab53a56c60825ff3246e58bf229d4440c621db4 WatchSource:0}: Error finding container 9d8e3ecc5723080b00363ed0eab53a56c60825ff3246e58bf229d4440c621db4: Status 404 returned error can't find the container with id 9d8e3ecc5723080b00363ed0eab53a56c60825ff3246e58bf229d4440c621db4 Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.662461 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:29:12 crc kubenswrapper[4929]: I1122 07:29:12.956173 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hfw9c" event={"ID":"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551","Type":"ContainerStarted","Data":"9d8e3ecc5723080b00363ed0eab53a56c60825ff3246e58bf229d4440c621db4"} Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.073078 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-4vf85" Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.275104 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.892185 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-pfl57"] Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.894084 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.897718 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pfl57"] Nov 22 07:29:15 crc kubenswrapper[4929]: I1122 07:29:15.947413 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ml7p\" (UniqueName: \"kubernetes.io/projected/f740d689-ada3-413f-a904-e09e08e09a1b-kube-api-access-2ml7p\") pod \"openstack-operator-index-pfl57\" (UID: \"f740d689-ada3-413f-a904-e09e08e09a1b\") " pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:16 crc kubenswrapper[4929]: I1122 07:29:16.048985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ml7p\" (UniqueName: \"kubernetes.io/projected/f740d689-ada3-413f-a904-e09e08e09a1b-kube-api-access-2ml7p\") pod \"openstack-operator-index-pfl57\" (UID: \"f740d689-ada3-413f-a904-e09e08e09a1b\") " pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:16 crc kubenswrapper[4929]: I1122 07:29:16.075950 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ml7p\" (UniqueName: \"kubernetes.io/projected/f740d689-ada3-413f-a904-e09e08e09a1b-kube-api-access-2ml7p\") pod \"openstack-operator-index-pfl57\" (UID: \"f740d689-ada3-413f-a904-e09e08e09a1b\") " pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:16 crc kubenswrapper[4929]: I1122 07:29:16.227453 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:17 crc kubenswrapper[4929]: I1122 07:29:17.751480 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-pfl57"] Nov 22 07:29:17 crc kubenswrapper[4929]: W1122 07:29:17.763427 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf740d689_ada3_413f_a904_e09e08e09a1b.slice/crio-259287f78f1a214afdfb7118d3ff61d54053f69b4ab4abe3494576ad2915e234 WatchSource:0}: Error finding container 259287f78f1a214afdfb7118d3ff61d54053f69b4ab4abe3494576ad2915e234: Status 404 returned error can't find the container with id 259287f78f1a214afdfb7118d3ff61d54053f69b4ab4abe3494576ad2915e234 Nov 22 07:29:17 crc kubenswrapper[4929]: I1122 07:29:17.991378 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hfw9c" event={"ID":"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551","Type":"ContainerStarted","Data":"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7"} Nov 22 07:29:17 crc kubenswrapper[4929]: I1122 07:29:17.991493 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-hfw9c" podUID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" containerName="registry-server" containerID="cri-o://401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7" gracePeriod=2 Nov 22 07:29:17 crc kubenswrapper[4929]: I1122 07:29:17.993537 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pfl57" event={"ID":"f740d689-ada3-413f-a904-e09e08e09a1b","Type":"ContainerStarted","Data":"259287f78f1a214afdfb7118d3ff61d54053f69b4ab4abe3494576ad2915e234"} Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.018534 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-hfw9c" podStartSLOduration=2.075603952 podStartE2EDuration="7.018508964s" podCreationTimestamp="2025-11-22 07:29:11 +0000 UTC" firstStartedPulling="2025-11-22 07:29:12.662057657 +0000 UTC m=+1089.771511680" lastFinishedPulling="2025-11-22 07:29:17.604962639 +0000 UTC m=+1094.714416692" observedRunningTime="2025-11-22 07:29:18.012970004 +0000 UTC m=+1095.122424027" watchObservedRunningTime="2025-11-22 07:29:18.018508964 +0000 UTC m=+1095.127962987" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.367044 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.478549 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cvtj\" (UniqueName: \"kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj\") pod \"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551\" (UID: \"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551\") " Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.484308 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj" (OuterVolumeSpecName: "kube-api-access-6cvtj") pod "308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" (UID: "308f3b2c-99ff-4dc1-bbfe-80ccc0b13551"). InnerVolumeSpecName "kube-api-access-6cvtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.580286 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cvtj\" (UniqueName: \"kubernetes.io/projected/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551-kube-api-access-6cvtj\") on node \"crc\" DevicePath \"\"" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.595043 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.595119 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.595240 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.596205 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:29:18 crc kubenswrapper[4929]: I1122 07:29:18.596359 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83" gracePeriod=600 Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.003844 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83" exitCode=0 Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.003925 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83"} Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.004345 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1"} Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.004386 4929 scope.go:117] "RemoveContainer" containerID="80896cec93a4f79a4cc130dc8d62e6989c7e25a8b824a1964efc7835e41ed527" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.008796 4929 generic.go:334] "Generic (PLEG): container finished" podID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" containerID="401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7" exitCode=0 Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.008855 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hfw9c" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.008879 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hfw9c" event={"ID":"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551","Type":"ContainerDied","Data":"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7"} Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.008979 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hfw9c" event={"ID":"308f3b2c-99ff-4dc1-bbfe-80ccc0b13551","Type":"ContainerDied","Data":"9d8e3ecc5723080b00363ed0eab53a56c60825ff3246e58bf229d4440c621db4"} Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.011329 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-pfl57" event={"ID":"f740d689-ada3-413f-a904-e09e08e09a1b","Type":"ContainerStarted","Data":"22f0a34148eb6e2c45121a0bb10c0c43683458042bdd3ab90322ea9b72994501"} Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.038525 4929 scope.go:117] "RemoveContainer" containerID="401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.053427 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-pfl57" podStartSLOduration=3.846158196 podStartE2EDuration="4.053408086s" podCreationTimestamp="2025-11-22 07:29:15 +0000 UTC" firstStartedPulling="2025-11-22 07:29:17.767303975 +0000 UTC m=+1094.876757978" lastFinishedPulling="2025-11-22 07:29:17.974553855 +0000 UTC m=+1095.084007868" observedRunningTime="2025-11-22 07:29:19.045529268 +0000 UTC m=+1096.154983291" watchObservedRunningTime="2025-11-22 07:29:19.053408086 +0000 UTC m=+1096.162862099" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.061726 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.066111 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-hfw9c"] Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.069536 4929 scope.go:117] "RemoveContainer" containerID="401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7" Nov 22 07:29:19 crc kubenswrapper[4929]: E1122 07:29:19.069960 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7\": container with ID starting with 401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7 not found: ID does not exist" containerID="401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.070001 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7"} err="failed to get container status \"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7\": rpc error: code = NotFound desc = could not find container \"401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7\": container with ID starting with 401538d1c0ba25bc986d7f65fedc312e358b9cd3d0d3f3bfc353d9f92c370ec7 not found: ID does not exist" Nov 22 07:29:19 crc kubenswrapper[4929]: I1122 07:29:19.957804 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" path="/var/lib/kubelet/pods/308f3b2c-99ff-4dc1-bbfe-80ccc0b13551/volumes" Nov 22 07:29:26 crc kubenswrapper[4929]: I1122 07:29:26.228034 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:26 crc kubenswrapper[4929]: I1122 07:29:26.228640 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:26 crc kubenswrapper[4929]: I1122 07:29:26.256833 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:27 crc kubenswrapper[4929]: I1122 07:29:27.100562 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-pfl57" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.552380 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg"] Nov 22 07:29:34 crc kubenswrapper[4929]: E1122 07:29:34.553889 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" containerName="registry-server" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.553910 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" containerName="registry-server" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.554078 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="308f3b2c-99ff-4dc1-bbfe-80ccc0b13551" containerName="registry-server" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.555184 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.558716 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9pwhv" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.571459 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg"] Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.643133 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6css\" (UniqueName: \"kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.643269 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.643303 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.744146 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6css\" (UniqueName: \"kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.744263 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.744283 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.744731 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.744790 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.767651 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6css\" (UniqueName: \"kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css\") pod \"95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:34 crc kubenswrapper[4929]: I1122 07:29:34.878266 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:35 crc kubenswrapper[4929]: I1122 07:29:35.276545 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg"] Nov 22 07:29:36 crc kubenswrapper[4929]: I1122 07:29:36.136532 4929 generic.go:334] "Generic (PLEG): container finished" podID="a5922246-83ed-429e-82eb-f9fd7810d687" containerID="ae42b68da4f3d9aeae0af167e33de5af9673e816f5e66914b83bd164b51871eb" exitCode=0 Nov 22 07:29:36 crc kubenswrapper[4929]: I1122 07:29:36.136591 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" event={"ID":"a5922246-83ed-429e-82eb-f9fd7810d687","Type":"ContainerDied","Data":"ae42b68da4f3d9aeae0af167e33de5af9673e816f5e66914b83bd164b51871eb"} Nov 22 07:29:36 crc kubenswrapper[4929]: I1122 07:29:36.136906 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" event={"ID":"a5922246-83ed-429e-82eb-f9fd7810d687","Type":"ContainerStarted","Data":"202c51b27ee7c851dacd0f588e158000fbca83737cae53a93a30504f4d3f6d3e"} Nov 22 07:29:38 crc kubenswrapper[4929]: I1122 07:29:38.150852 4929 generic.go:334] "Generic (PLEG): container finished" podID="a5922246-83ed-429e-82eb-f9fd7810d687" containerID="58e3facd5bce58a93e210cebce7adcab53d845846255bdf53eb190ced6e4fe05" exitCode=0 Nov 22 07:29:38 crc kubenswrapper[4929]: I1122 07:29:38.150943 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" event={"ID":"a5922246-83ed-429e-82eb-f9fd7810d687","Type":"ContainerDied","Data":"58e3facd5bce58a93e210cebce7adcab53d845846255bdf53eb190ced6e4fe05"} Nov 22 07:29:39 crc kubenswrapper[4929]: I1122 07:29:39.158634 4929 generic.go:334] "Generic (PLEG): container finished" podID="a5922246-83ed-429e-82eb-f9fd7810d687" containerID="6aba3ef56f221545581b8d83daf580d80c0839e1acb18053e52b1b2f833c8da2" exitCode=0 Nov 22 07:29:39 crc kubenswrapper[4929]: I1122 07:29:39.158711 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" event={"ID":"a5922246-83ed-429e-82eb-f9fd7810d687","Type":"ContainerDied","Data":"6aba3ef56f221545581b8d83daf580d80c0839e1acb18053e52b1b2f833c8da2"} Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.473065 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.570234 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util\") pod \"a5922246-83ed-429e-82eb-f9fd7810d687\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.570326 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v6css\" (UniqueName: \"kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css\") pod \"a5922246-83ed-429e-82eb-f9fd7810d687\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.570433 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle\") pod \"a5922246-83ed-429e-82eb-f9fd7810d687\" (UID: \"a5922246-83ed-429e-82eb-f9fd7810d687\") " Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.571565 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle" (OuterVolumeSpecName: "bundle") pod "a5922246-83ed-429e-82eb-f9fd7810d687" (UID: "a5922246-83ed-429e-82eb-f9fd7810d687"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.577183 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css" (OuterVolumeSpecName: "kube-api-access-v6css") pod "a5922246-83ed-429e-82eb-f9fd7810d687" (UID: "a5922246-83ed-429e-82eb-f9fd7810d687"). InnerVolumeSpecName "kube-api-access-v6css". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.583895 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util" (OuterVolumeSpecName: "util") pod "a5922246-83ed-429e-82eb-f9fd7810d687" (UID: "a5922246-83ed-429e-82eb-f9fd7810d687"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.672067 4929 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-util\") on node \"crc\" DevicePath \"\"" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.672105 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v6css\" (UniqueName: \"kubernetes.io/projected/a5922246-83ed-429e-82eb-f9fd7810d687-kube-api-access-v6css\") on node \"crc\" DevicePath \"\"" Nov 22 07:29:40 crc kubenswrapper[4929]: I1122 07:29:40.672121 4929 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5922246-83ed-429e-82eb-f9fd7810d687-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:29:41 crc kubenswrapper[4929]: I1122 07:29:41.175900 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" event={"ID":"a5922246-83ed-429e-82eb-f9fd7810d687","Type":"ContainerDied","Data":"202c51b27ee7c851dacd0f588e158000fbca83737cae53a93a30504f4d3f6d3e"} Nov 22 07:29:41 crc kubenswrapper[4929]: I1122 07:29:41.175947 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="202c51b27ee7c851dacd0f588e158000fbca83737cae53a93a30504f4d3f6d3e" Nov 22 07:29:41 crc kubenswrapper[4929]: I1122 07:29:41.176019 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.930825 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7"] Nov 22 07:29:47 crc kubenswrapper[4929]: E1122 07:29:47.931751 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="extract" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.931769 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="extract" Nov 22 07:29:47 crc kubenswrapper[4929]: E1122 07:29:47.931786 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="pull" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.931794 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="pull" Nov 22 07:29:47 crc kubenswrapper[4929]: E1122 07:29:47.931812 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="util" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.931821 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="util" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.931957 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5922246-83ed-429e-82eb-f9fd7810d687" containerName="extract" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.932839 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.934667 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-th486" Nov 22 07:29:47 crc kubenswrapper[4929]: I1122 07:29:47.977392 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7"] Nov 22 07:29:48 crc kubenswrapper[4929]: I1122 07:29:48.065876 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9mtt\" (UniqueName: \"kubernetes.io/projected/22cf71fc-be54-4a37-8663-2b9419232940-kube-api-access-b9mtt\") pod \"openstack-operator-controller-operator-64f4bfcd74-twkf7\" (UID: \"22cf71fc-be54-4a37-8663-2b9419232940\") " pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:48 crc kubenswrapper[4929]: I1122 07:29:48.167573 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9mtt\" (UniqueName: \"kubernetes.io/projected/22cf71fc-be54-4a37-8663-2b9419232940-kube-api-access-b9mtt\") pod \"openstack-operator-controller-operator-64f4bfcd74-twkf7\" (UID: \"22cf71fc-be54-4a37-8663-2b9419232940\") " pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:48 crc kubenswrapper[4929]: I1122 07:29:48.194687 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9mtt\" (UniqueName: \"kubernetes.io/projected/22cf71fc-be54-4a37-8663-2b9419232940-kube-api-access-b9mtt\") pod \"openstack-operator-controller-operator-64f4bfcd74-twkf7\" (UID: \"22cf71fc-be54-4a37-8663-2b9419232940\") " pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:48 crc kubenswrapper[4929]: I1122 07:29:48.252626 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:48 crc kubenswrapper[4929]: I1122 07:29:48.537628 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7"] Nov 22 07:29:49 crc kubenswrapper[4929]: I1122 07:29:49.230937 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" event={"ID":"22cf71fc-be54-4a37-8663-2b9419232940","Type":"ContainerStarted","Data":"6bea038094790fc92f0841a9d953179d5bef4666dbd77825c1cc8c7759b5958e"} Nov 22 07:29:53 crc kubenswrapper[4929]: I1122 07:29:53.259577 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" event={"ID":"22cf71fc-be54-4a37-8663-2b9419232940","Type":"ContainerStarted","Data":"4addfb0d4031019bf4479512a4920970301cdc9e74d6df01148ce8237de2e89b"} Nov 22 07:29:56 crc kubenswrapper[4929]: I1122 07:29:56.281811 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" event={"ID":"22cf71fc-be54-4a37-8663-2b9419232940","Type":"ContainerStarted","Data":"130d5409b601875034c1a51e75278548e5f0daa754c17845b3a8b5615f0c0192"} Nov 22 07:29:56 crc kubenswrapper[4929]: I1122 07:29:56.282129 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:29:56 crc kubenswrapper[4929]: I1122 07:29:56.318746 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" podStartSLOduration=2.119783433 podStartE2EDuration="9.31872453s" podCreationTimestamp="2025-11-22 07:29:47 +0000 UTC" firstStartedPulling="2025-11-22 07:29:48.552419207 +0000 UTC m=+1125.661873220" lastFinishedPulling="2025-11-22 07:29:55.751360304 +0000 UTC m=+1132.860814317" observedRunningTime="2025-11-22 07:29:56.314205276 +0000 UTC m=+1133.423659309" watchObservedRunningTime="2025-11-22 07:29:56.31872453 +0000 UTC m=+1133.428178553" Nov 22 07:29:57 crc kubenswrapper[4929]: I1122 07:29:57.289980 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-64f4bfcd74-twkf7" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.133649 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw"] Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.134810 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.138013 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.139578 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.146284 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw"] Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.254107 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fr62z\" (UniqueName: \"kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.254268 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.254312 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.355308 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.355361 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.355411 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fr62z\" (UniqueName: \"kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.356339 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.362303 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.373266 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fr62z\" (UniqueName: \"kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z\") pod \"collect-profiles-29396610-lkpxw\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.456758 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:00 crc kubenswrapper[4929]: I1122 07:30:00.858316 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw"] Nov 22 07:30:01 crc kubenswrapper[4929]: I1122 07:30:01.310985 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" event={"ID":"33bf4915-25d4-40b8-9082-9dff385e1f46","Type":"ContainerStarted","Data":"f63dab2d35f6105de938ebe7833e14068a260222ba52e5bb5ae3a7b92d6542ab"} Nov 22 07:30:01 crc kubenswrapper[4929]: I1122 07:30:01.311385 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" event={"ID":"33bf4915-25d4-40b8-9082-9dff385e1f46","Type":"ContainerStarted","Data":"95d3399504b49b4ab6a0c4db5d65c96d2039bbbd806db127b47064b6a6428307"} Nov 22 07:30:02 crc kubenswrapper[4929]: I1122 07:30:02.319745 4929 generic.go:334] "Generic (PLEG): container finished" podID="33bf4915-25d4-40b8-9082-9dff385e1f46" containerID="f63dab2d35f6105de938ebe7833e14068a260222ba52e5bb5ae3a7b92d6542ab" exitCode=0 Nov 22 07:30:02 crc kubenswrapper[4929]: I1122 07:30:02.319812 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" event={"ID":"33bf4915-25d4-40b8-9082-9dff385e1f46","Type":"ContainerDied","Data":"f63dab2d35f6105de938ebe7833e14068a260222ba52e5bb5ae3a7b92d6542ab"} Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.612398 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.700935 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fr62z\" (UniqueName: \"kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z\") pod \"33bf4915-25d4-40b8-9082-9dff385e1f46\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.701000 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume\") pod \"33bf4915-25d4-40b8-9082-9dff385e1f46\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.701059 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume\") pod \"33bf4915-25d4-40b8-9082-9dff385e1f46\" (UID: \"33bf4915-25d4-40b8-9082-9dff385e1f46\") " Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.701738 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume" (OuterVolumeSpecName: "config-volume") pod "33bf4915-25d4-40b8-9082-9dff385e1f46" (UID: "33bf4915-25d4-40b8-9082-9dff385e1f46"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.707398 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z" (OuterVolumeSpecName: "kube-api-access-fr62z") pod "33bf4915-25d4-40b8-9082-9dff385e1f46" (UID: "33bf4915-25d4-40b8-9082-9dff385e1f46"). InnerVolumeSpecName "kube-api-access-fr62z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.707413 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "33bf4915-25d4-40b8-9082-9dff385e1f46" (UID: "33bf4915-25d4-40b8-9082-9dff385e1f46"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.802928 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/33bf4915-25d4-40b8-9082-9dff385e1f46-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.802969 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fr62z\" (UniqueName: \"kubernetes.io/projected/33bf4915-25d4-40b8-9082-9dff385e1f46-kube-api-access-fr62z\") on node \"crc\" DevicePath \"\"" Nov 22 07:30:03 crc kubenswrapper[4929]: I1122 07:30:03.802983 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/33bf4915-25d4-40b8-9082-9dff385e1f46-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:30:04 crc kubenswrapper[4929]: I1122 07:30:04.333720 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" event={"ID":"33bf4915-25d4-40b8-9082-9dff385e1f46","Type":"ContainerDied","Data":"95d3399504b49b4ab6a0c4db5d65c96d2039bbbd806db127b47064b6a6428307"} Nov 22 07:30:04 crc kubenswrapper[4929]: I1122 07:30:04.333768 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95d3399504b49b4ab6a0c4db5d65c96d2039bbbd806db127b47064b6a6428307" Nov 22 07:30:04 crc kubenswrapper[4929]: I1122 07:30:04.333875 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.813436 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8"] Nov 22 07:30:32 crc kubenswrapper[4929]: E1122 07:30:32.814097 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33bf4915-25d4-40b8-9082-9dff385e1f46" containerName="collect-profiles" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.814110 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="33bf4915-25d4-40b8-9082-9dff385e1f46" containerName="collect-profiles" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.814242 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="33bf4915-25d4-40b8-9082-9dff385e1f46" containerName="collect-profiles" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.814819 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.817625 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-9bkwn" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.820449 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.821752 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.825463 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-wvnvq" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.830051 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.840051 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.847912 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.849239 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.850831 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-xwgbr" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.851677 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-b2l86"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.853054 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.854461 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-xf7nn" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.879356 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.880567 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.882331 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-zcbjz" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.899365 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-b2l86"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.907695 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.922345 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.923373 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.927141 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-7rzjs" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.927987 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.929016 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.931836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4pp9\" (UniqueName: \"kubernetes.io/projected/aa0e6902-572d-44e6-9a5b-efbbbf188e01-kube-api-access-p4pp9\") pod \"barbican-operator-controller-manager-75fb479bcc-hlxc8\" (UID: \"aa0e6902-572d-44e6-9a5b-efbbbf188e01\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.931899 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wtwg\" (UniqueName: \"kubernetes.io/projected/f90b6c4a-a83b-48df-95ab-0240fee75881-kube-api-access-9wtwg\") pod \"cinder-operator-controller-manager-6498cbf48f-h7h67\" (UID: \"f90b6c4a-a83b-48df-95ab-0240fee75881\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.933381 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.933646 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-ndb4g" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.940285 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.941450 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.946660 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-5j8fz" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.951933 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.958532 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn"] Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.963679 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.966763 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-k7qb7" Nov 22 07:30:32 crc kubenswrapper[4929]: I1122 07:30:32.982800 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.009443 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.009693 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.024018 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.027073 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-77qms" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.027108 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.029324 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.029511 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-jpkbn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035123 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tn4b\" (UniqueName: \"kubernetes.io/projected/f0d8223c-7551-48b6-8a56-fd43fb266534-kube-api-access-8tn4b\") pod \"glance-operator-controller-manager-7969689c84-b2l86\" (UID: \"f0d8223c-7551-48b6-8a56-fd43fb266534\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035159 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8cgq\" (UniqueName: \"kubernetes.io/projected/80a127c3-bf01-4a74-8f00-9226fbb485cd-kube-api-access-b8cgq\") pod \"horizon-operator-controller-manager-598f69df5d-9tqpv\" (UID: \"80a127c3-bf01-4a74-8f00-9226fbb485cd\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035193 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln4js\" (UniqueName: \"kubernetes.io/projected/3db96ba7-8db5-4603-bab2-bafe30ce4874-kube-api-access-ln4js\") pod \"designate-operator-controller-manager-767ccfd65f-zldhw\" (UID: \"3db96ba7-8db5-4603-bab2-bafe30ce4874\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035240 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4pp9\" (UniqueName: \"kubernetes.io/projected/aa0e6902-572d-44e6-9a5b-efbbbf188e01-kube-api-access-p4pp9\") pod \"barbican-operator-controller-manager-75fb479bcc-hlxc8\" (UID: \"aa0e6902-572d-44e6-9a5b-efbbbf188e01\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035262 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d45zx\" (UniqueName: \"kubernetes.io/projected/1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f-kube-api-access-d45zx\") pod \"heat-operator-controller-manager-56f54d6746-wg5hx\" (UID: \"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035283 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035318 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wtwg\" (UniqueName: \"kubernetes.io/projected/f90b6c4a-a83b-48df-95ab-0240fee75881-kube-api-access-9wtwg\") pod \"cinder-operator-controller-manager-6498cbf48f-h7h67\" (UID: \"f90b6c4a-a83b-48df-95ab-0240fee75881\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035361 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbqcz\" (UniqueName: \"kubernetes.io/projected/ccb9635f-f142-4835-99be-509914bbeb1c-kube-api-access-fbqcz\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.035800 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87ng9\" (UniqueName: \"kubernetes.io/projected/e8422098-8e21-47a4-9a93-9e3eed95dde7-kube-api-access-87ng9\") pod \"neutron-operator-controller-manager-78bd47f458-knndh\" (UID: \"e8422098-8e21-47a4-9a93-9e3eed95dde7\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.070552 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-xr2cf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.077982 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4pp9\" (UniqueName: \"kubernetes.io/projected/aa0e6902-572d-44e6-9a5b-efbbbf188e01-kube-api-access-p4pp9\") pod \"barbican-operator-controller-manager-75fb479bcc-hlxc8\" (UID: \"aa0e6902-572d-44e6-9a5b-efbbbf188e01\") " pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.086300 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.087166 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wtwg\" (UniqueName: \"kubernetes.io/projected/f90b6c4a-a83b-48df-95ab-0240fee75881-kube-api-access-9wtwg\") pod \"cinder-operator-controller-manager-6498cbf48f-h7h67\" (UID: \"f90b6c4a-a83b-48df-95ab-0240fee75881\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.094121 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.124298 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142574 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87ng9\" (UniqueName: \"kubernetes.io/projected/e8422098-8e21-47a4-9a93-9e3eed95dde7-kube-api-access-87ng9\") pod \"neutron-operator-controller-manager-78bd47f458-knndh\" (UID: \"e8422098-8e21-47a4-9a93-9e3eed95dde7\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142614 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb5pm\" (UniqueName: \"kubernetes.io/projected/3713fa6e-3e99-41b8-bfff-22c91a622841-kube-api-access-jb5pm\") pod \"manila-operator-controller-manager-58f887965d-9lr2t\" (UID: \"3713fa6e-3e99-41b8-bfff-22c91a622841\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142642 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tn4b\" (UniqueName: \"kubernetes.io/projected/f0d8223c-7551-48b6-8a56-fd43fb266534-kube-api-access-8tn4b\") pod \"glance-operator-controller-manager-7969689c84-b2l86\" (UID: \"f0d8223c-7551-48b6-8a56-fd43fb266534\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142662 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8cgq\" (UniqueName: \"kubernetes.io/projected/80a127c3-bf01-4a74-8f00-9226fbb485cd-kube-api-access-b8cgq\") pod \"horizon-operator-controller-manager-598f69df5d-9tqpv\" (UID: \"80a127c3-bf01-4a74-8f00-9226fbb485cd\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142685 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm9nk\" (UniqueName: \"kubernetes.io/projected/79972b49-f1f3-482e-88ac-63f5e249d537-kube-api-access-xm9nk\") pod \"keystone-operator-controller-manager-7454b96578-rrdbn\" (UID: \"79972b49-f1f3-482e-88ac-63f5e249d537\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142704 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln4js\" (UniqueName: \"kubernetes.io/projected/3db96ba7-8db5-4603-bab2-bafe30ce4874-kube-api-access-ln4js\") pod \"designate-operator-controller-manager-767ccfd65f-zldhw\" (UID: \"3db96ba7-8db5-4603-bab2-bafe30ce4874\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142726 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d45zx\" (UniqueName: \"kubernetes.io/projected/1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f-kube-api-access-d45zx\") pod \"heat-operator-controller-manager-56f54d6746-wg5hx\" (UID: \"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142750 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142784 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stx8k\" (UniqueName: \"kubernetes.io/projected/696093b1-6e6c-4f6d-915b-f1b5481834a5-kube-api-access-stx8k\") pod \"mariadb-operator-controller-manager-54b5986bb8-dzhvl\" (UID: \"696093b1-6e6c-4f6d-915b-f1b5481834a5\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142807 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbqcz\" (UniqueName: \"kubernetes.io/projected/ccb9635f-f142-4835-99be-509914bbeb1c-kube-api-access-fbqcz\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.142827 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hspn\" (UniqueName: \"kubernetes.io/projected/7ec24093-c914-475e-86b9-2c4ba96791ff-kube-api-access-6hspn\") pod \"ironic-operator-controller-manager-99b499f4-q5bvk\" (UID: \"7ec24093-c914-475e-86b9-2c4ba96791ff\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:30:33 crc kubenswrapper[4929]: E1122 07:30:33.143579 4929 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 22 07:30:33 crc kubenswrapper[4929]: E1122 07:30:33.143634 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert podName:ccb9635f-f142-4835-99be-509914bbeb1c nodeName:}" failed. No retries permitted until 2025-11-22 07:30:33.643615713 +0000 UTC m=+1170.753069726 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert") pod "infra-operator-controller-manager-6dd8864d7c-62kf4" (UID: "ccb9635f-f142-4835-99be-509914bbeb1c") : secret "infra-operator-webhook-server-cert" not found Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.145273 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.146528 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.148631 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.166828 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.167003 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-z765g" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.185094 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.205124 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln4js\" (UniqueName: \"kubernetes.io/projected/3db96ba7-8db5-4603-bab2-bafe30ce4874-kube-api-access-ln4js\") pod \"designate-operator-controller-manager-767ccfd65f-zldhw\" (UID: \"3db96ba7-8db5-4603-bab2-bafe30ce4874\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.215041 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbqcz\" (UniqueName: \"kubernetes.io/projected/ccb9635f-f142-4835-99be-509914bbeb1c-kube-api-access-fbqcz\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.241590 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d45zx\" (UniqueName: \"kubernetes.io/projected/1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f-kube-api-access-d45zx\") pod \"heat-operator-controller-manager-56f54d6746-wg5hx\" (UID: \"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f\") " pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.242040 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tn4b\" (UniqueName: \"kubernetes.io/projected/f0d8223c-7551-48b6-8a56-fd43fb266534-kube-api-access-8tn4b\") pod \"glance-operator-controller-manager-7969689c84-b2l86\" (UID: \"f0d8223c-7551-48b6-8a56-fd43fb266534\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.242807 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87ng9\" (UniqueName: \"kubernetes.io/projected/e8422098-8e21-47a4-9a93-9e3eed95dde7-kube-api-access-87ng9\") pod \"neutron-operator-controller-manager-78bd47f458-knndh\" (UID: \"e8422098-8e21-47a4-9a93-9e3eed95dde7\") " pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.243560 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hspn\" (UniqueName: \"kubernetes.io/projected/7ec24093-c914-475e-86b9-2c4ba96791ff-kube-api-access-6hspn\") pod \"ironic-operator-controller-manager-99b499f4-q5bvk\" (UID: \"7ec24093-c914-475e-86b9-2c4ba96791ff\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.243614 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb5pm\" (UniqueName: \"kubernetes.io/projected/3713fa6e-3e99-41b8-bfff-22c91a622841-kube-api-access-jb5pm\") pod \"manila-operator-controller-manager-58f887965d-9lr2t\" (UID: \"3713fa6e-3e99-41b8-bfff-22c91a622841\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.243667 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm9nk\" (UniqueName: \"kubernetes.io/projected/79972b49-f1f3-482e-88ac-63f5e249d537-kube-api-access-xm9nk\") pod \"keystone-operator-controller-manager-7454b96578-rrdbn\" (UID: \"79972b49-f1f3-482e-88ac-63f5e249d537\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.243755 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stx8k\" (UniqueName: \"kubernetes.io/projected/696093b1-6e6c-4f6d-915b-f1b5481834a5-kube-api-access-stx8k\") pod \"mariadb-operator-controller-manager-54b5986bb8-dzhvl\" (UID: \"696093b1-6e6c-4f6d-915b-f1b5481834a5\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.253154 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8cgq\" (UniqueName: \"kubernetes.io/projected/80a127c3-bf01-4a74-8f00-9226fbb485cd-kube-api-access-b8cgq\") pod \"horizon-operator-controller-manager-598f69df5d-9tqpv\" (UID: \"80a127c3-bf01-4a74-8f00-9226fbb485cd\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.256402 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.276473 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.289077 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-7hp2b" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.295277 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.296051 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm9nk\" (UniqueName: \"kubernetes.io/projected/79972b49-f1f3-482e-88ac-63f5e249d537-kube-api-access-xm9nk\") pod \"keystone-operator-controller-manager-7454b96578-rrdbn\" (UID: \"79972b49-f1f3-482e-88ac-63f5e249d537\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.306079 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hspn\" (UniqueName: \"kubernetes.io/projected/7ec24093-c914-475e-86b9-2c4ba96791ff-kube-api-access-6hspn\") pod \"ironic-operator-controller-manager-99b499f4-q5bvk\" (UID: \"7ec24093-c914-475e-86b9-2c4ba96791ff\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.312726 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb5pm\" (UniqueName: \"kubernetes.io/projected/3713fa6e-3e99-41b8-bfff-22c91a622841-kube-api-access-jb5pm\") pod \"manila-operator-controller-manager-58f887965d-9lr2t\" (UID: \"3713fa6e-3e99-41b8-bfff-22c91a622841\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.313082 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.327839 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stx8k\" (UniqueName: \"kubernetes.io/projected/696093b1-6e6c-4f6d-915b-f1b5481834a5-kube-api-access-stx8k\") pod \"mariadb-operator-controller-manager-54b5986bb8-dzhvl\" (UID: \"696093b1-6e6c-4f6d-915b-f1b5481834a5\") " pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.337899 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.338154 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.367915 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.379981 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnlnz\" (UniqueName: \"kubernetes.io/projected/bf7b0c38-b9e7-4036-a9c0-9494b65c8714-kube-api-access-dnlnz\") pod \"octavia-operator-controller-manager-54cfbf4c7d-4ssf8\" (UID: \"bf7b0c38-b9e7-4036-a9c0-9494b65c8714\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.380054 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftgxz\" (UniqueName: \"kubernetes.io/projected/5eb78153-a103-4885-b710-406198b25403-kube-api-access-ftgxz\") pod \"nova-operator-controller-manager-cfbb9c588-ckg8c\" (UID: \"5eb78153-a103-4885-b710-406198b25403\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.394042 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.409291 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.420285 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.421420 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.431547 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-r4mzn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.435486 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.435760 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.455181 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.456583 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.459638 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.459890 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-krshv" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.460057 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.460244 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.467801 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.468948 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.471459 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-2mfxl" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.481252 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnlnz\" (UniqueName: \"kubernetes.io/projected/bf7b0c38-b9e7-4036-a9c0-9494b65c8714-kube-api-access-dnlnz\") pod \"octavia-operator-controller-manager-54cfbf4c7d-4ssf8\" (UID: \"bf7b0c38-b9e7-4036-a9c0-9494b65c8714\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.481288 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss8tb\" (UniqueName: \"kubernetes.io/projected/38070268-a2fc-4876-a8c2-81dcd0bc7f28-kube-api-access-ss8tb\") pod \"ovn-operator-controller-manager-54fc5f65b7-2lnbz\" (UID: \"38070268-a2fc-4876-a8c2-81dcd0bc7f28\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.481435 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftgxz\" (UniqueName: \"kubernetes.io/projected/5eb78153-a103-4885-b710-406198b25403-kube-api-access-ftgxz\") pod \"nova-operator-controller-manager-cfbb9c588-ckg8c\" (UID: \"5eb78153-a103-4885-b710-406198b25403\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.495032 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.500957 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-2trxf"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.502023 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.505878 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-q9fff" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.507274 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.512201 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.518281 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.521356 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnlnz\" (UniqueName: \"kubernetes.io/projected/bf7b0c38-b9e7-4036-a9c0-9494b65c8714-kube-api-access-dnlnz\") pod \"octavia-operator-controller-manager-54cfbf4c7d-4ssf8\" (UID: \"bf7b0c38-b9e7-4036-a9c0-9494b65c8714\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.524176 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.524773 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftgxz\" (UniqueName: \"kubernetes.io/projected/5eb78153-a103-4885-b710-406198b25403-kube-api-access-ftgxz\") pod \"nova-operator-controller-manager-cfbb9c588-ckg8c\" (UID: \"5eb78153-a103-4885-b710-406198b25403\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.525234 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-2trxf"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.551375 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.552556 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.562704 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.564769 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-wlj5c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.565284 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.566353 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.571501 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-g64vx"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.572509 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.580117 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-dj77f" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583756 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw8gg\" (UniqueName: \"kubernetes.io/projected/9edcc36b-ecc0-4d9a-b549-e370b4a5e018-kube-api-access-tw8gg\") pod \"placement-operator-controller-manager-5b797b8dff-m7z2n\" (UID: \"9edcc36b-ecc0-4d9a-b549-e370b4a5e018\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl9v7\" (UniqueName: \"kubernetes.io/projected/edea9302-eeed-48b3-a75f-170198cd1b2f-kube-api-access-kl9v7\") pod \"swift-operator-controller-manager-d656998f4-2trxf\" (UID: \"edea9302-eeed-48b3-a75f-170198cd1b2f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583814 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583847 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9gz8\" (UniqueName: \"kubernetes.io/projected/d9191fcc-97a0-4e23-bc96-89dfbd474f25-kube-api-access-z9gz8\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583877 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmrjh\" (UniqueName: \"kubernetes.io/projected/027a936c-fba7-49af-b9b7-e849b356c87c-kube-api-access-pmrjh\") pod \"telemetry-operator-controller-manager-6d4bf84b58-xh4kd\" (UID: \"027a936c-fba7-49af-b9b7-e849b356c87c\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.583906 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss8tb\" (UniqueName: \"kubernetes.io/projected/38070268-a2fc-4876-a8c2-81dcd0bc7f28-kube-api-access-ss8tb\") pod \"ovn-operator-controller-manager-54fc5f65b7-2lnbz\" (UID: \"38070268-a2fc-4876-a8c2-81dcd0bc7f28\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.598710 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-g64vx"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.671392 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.673267 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss8tb\" (UniqueName: \"kubernetes.io/projected/38070268-a2fc-4876-a8c2-81dcd0bc7f28-kube-api-access-ss8tb\") pod \"ovn-operator-controller-manager-54fc5f65b7-2lnbz\" (UID: \"38070268-a2fc-4876-a8c2-81dcd0bc7f28\") " pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685733 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl9v7\" (UniqueName: \"kubernetes.io/projected/edea9302-eeed-48b3-a75f-170198cd1b2f-kube-api-access-kl9v7\") pod \"swift-operator-controller-manager-d656998f4-2trxf\" (UID: \"edea9302-eeed-48b3-a75f-170198cd1b2f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685791 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685830 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkql9\" (UniqueName: \"kubernetes.io/projected/91edab4b-0002-4a85-9ab9-9475a07e0be4-kube-api-access-gkql9\") pod \"test-operator-controller-manager-b4c496f69-g64vx\" (UID: \"91edab4b-0002-4a85-9ab9-9475a07e0be4\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685872 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9gz8\" (UniqueName: \"kubernetes.io/projected/d9191fcc-97a0-4e23-bc96-89dfbd474f25-kube-api-access-z9gz8\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685899 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmrjh\" (UniqueName: \"kubernetes.io/projected/027a936c-fba7-49af-b9b7-e849b356c87c-kube-api-access-pmrjh\") pod \"telemetry-operator-controller-manager-6d4bf84b58-xh4kd\" (UID: \"027a936c-fba7-49af-b9b7-e849b356c87c\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685971 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.685994 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw8gg\" (UniqueName: \"kubernetes.io/projected/9edcc36b-ecc0-4d9a-b549-e370b4a5e018-kube-api-access-tw8gg\") pod \"placement-operator-controller-manager-5b797b8dff-m7z2n\" (UID: \"9edcc36b-ecc0-4d9a-b549-e370b4a5e018\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:30:33 crc kubenswrapper[4929]: E1122 07:30:33.686572 4929 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 07:30:33 crc kubenswrapper[4929]: E1122 07:30:33.686617 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert podName:d9191fcc-97a0-4e23-bc96-89dfbd474f25 nodeName:}" failed. No retries permitted until 2025-11-22 07:30:34.186599294 +0000 UTC m=+1171.296053307 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert") pod "openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" (UID: "d9191fcc-97a0-4e23-bc96-89dfbd474f25") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.718564 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.723739 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl9v7\" (UniqueName: \"kubernetes.io/projected/edea9302-eeed-48b3-a75f-170198cd1b2f-kube-api-access-kl9v7\") pod \"swift-operator-controller-manager-d656998f4-2trxf\" (UID: \"edea9302-eeed-48b3-a75f-170198cd1b2f\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.724766 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.727038 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ccb9635f-f142-4835-99be-509914bbeb1c-cert\") pod \"infra-operator-controller-manager-6dd8864d7c-62kf4\" (UID: \"ccb9635f-f142-4835-99be-509914bbeb1c\") " pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.734395 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw8gg\" (UniqueName: \"kubernetes.io/projected/9edcc36b-ecc0-4d9a-b549-e370b4a5e018-kube-api-access-tw8gg\") pod \"placement-operator-controller-manager-5b797b8dff-m7z2n\" (UID: \"9edcc36b-ecc0-4d9a-b549-e370b4a5e018\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.744918 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-n4hct" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.746956 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9gz8\" (UniqueName: \"kubernetes.io/projected/d9191fcc-97a0-4e23-bc96-89dfbd474f25-kube-api-access-z9gz8\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.750428 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.763300 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.774066 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmrjh\" (UniqueName: \"kubernetes.io/projected/027a936c-fba7-49af-b9b7-e849b356c87c-kube-api-access-pmrjh\") pod \"telemetry-operator-controller-manager-6d4bf84b58-xh4kd\" (UID: \"027a936c-fba7-49af-b9b7-e849b356c87c\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.823527 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkql9\" (UniqueName: \"kubernetes.io/projected/91edab4b-0002-4a85-9ab9-9475a07e0be4-kube-api-access-gkql9\") pod \"test-operator-controller-manager-b4c496f69-g64vx\" (UID: \"91edab4b-0002-4a85-9ab9-9475a07e0be4\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.823652 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwck2\" (UniqueName: \"kubernetes.io/projected/62c58af8-c121-43e3-b91c-524e2d2783bd-kube-api-access-cwck2\") pod \"watcher-operator-controller-manager-747b777d6b-wwnbh\" (UID: \"62c58af8-c121-43e3-b91c-524e2d2783bd\") " pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.824351 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.839840 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.868993 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkql9\" (UniqueName: \"kubernetes.io/projected/91edab4b-0002-4a85-9ab9-9475a07e0be4-kube-api-access-gkql9\") pod \"test-operator-controller-manager-b4c496f69-g64vx\" (UID: \"91edab4b-0002-4a85-9ab9-9475a07e0be4\") " pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.886168 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.901539 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.909657 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.911011 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.918625 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.918834 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-8htz7" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.920689 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn"] Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.926537 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwck2\" (UniqueName: \"kubernetes.io/projected/62c58af8-c121-43e3-b91c-524e2d2783bd-kube-api-access-cwck2\") pod \"watcher-operator-controller-manager-747b777d6b-wwnbh\" (UID: \"62c58af8-c121-43e3-b91c-524e2d2783bd\") " pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.960258 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwck2\" (UniqueName: \"kubernetes.io/projected/62c58af8-c121-43e3-b91c-524e2d2783bd-kube-api-access-cwck2\") pod \"watcher-operator-controller-manager-747b777d6b-wwnbh\" (UID: \"62c58af8-c121-43e3-b91c-524e2d2783bd\") " pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:30:33 crc kubenswrapper[4929]: I1122 07:30:33.976443 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.014820 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62"] Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.016584 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62"] Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.016668 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.019716 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-rqhsj" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.032684 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbrf6\" (UniqueName: \"kubernetes.io/projected/0bc28b5a-7c1e-4252-828b-92057d3d6749-kube-api-access-nbrf6\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.032821 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.046624 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8"] Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.098830 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.133739 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.133809 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4r99\" (UniqueName: \"kubernetes.io/projected/4683d9d2-95c8-4fa3-a710-9b03cce32d1f-kube-api-access-t4r99\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-djv62\" (UID: \"4683d9d2-95c8-4fa3-a710-9b03cce32d1f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.133852 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbrf6\" (UniqueName: \"kubernetes.io/projected/0bc28b5a-7c1e-4252-828b-92057d3d6749-kube-api-access-nbrf6\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: E1122 07:30:34.134282 4929 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 22 07:30:34 crc kubenswrapper[4929]: E1122 07:30:34.134332 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert podName:0bc28b5a-7c1e-4252-828b-92057d3d6749 nodeName:}" failed. No retries permitted until 2025-11-22 07:30:34.63431446 +0000 UTC m=+1171.743768473 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert") pod "openstack-operator-controller-manager-6576b65d54-nl6qn" (UID: "0bc28b5a-7c1e-4252-828b-92057d3d6749") : secret "webhook-server-cert" not found Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.136721 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67"] Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.154144 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbrf6\" (UniqueName: \"kubernetes.io/projected/0bc28b5a-7c1e-4252-828b-92057d3d6749-kube-api-access-nbrf6\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.183232 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk"] Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.238114 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4r99\" (UniqueName: \"kubernetes.io/projected/4683d9d2-95c8-4fa3-a710-9b03cce32d1f-kube-api-access-t4r99\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-djv62\" (UID: \"4683d9d2-95c8-4fa3-a710-9b03cce32d1f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.238197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.242907 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d9191fcc-97a0-4e23-bc96-89dfbd474f25-cert\") pod \"openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz\" (UID: \"d9191fcc-97a0-4e23-bc96-89dfbd474f25\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.269953 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4r99\" (UniqueName: \"kubernetes.io/projected/4683d9d2-95c8-4fa3-a710-9b03cce32d1f-kube-api-access-t4r99\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-djv62\" (UID: \"4683d9d2-95c8-4fa3-a710-9b03cce32d1f\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" Nov 22 07:30:34 crc kubenswrapper[4929]: W1122 07:30:34.341525 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7ec24093_c914_475e_86b9_2c4ba96791ff.slice/crio-7cb8e3b930be7184f613926e18f97c3c76a7e322d282fbe753bfb64647dccc56 WatchSource:0}: Error finding container 7cb8e3b930be7184f613926e18f97c3c76a7e322d282fbe753bfb64647dccc56: Status 404 returned error can't find the container with id 7cb8e3b930be7184f613926e18f97c3c76a7e322d282fbe753bfb64647dccc56 Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.393541 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.428077 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.428569 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn"] Nov 22 07:30:34 crc kubenswrapper[4929]: W1122 07:30:34.510680 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79972b49_f1f3_482e_88ac_63f5e249d537.slice/crio-db49625ed76c02981a541d25acb969d42fb7c49c9c134ae4aeb734f2377ebef0 WatchSource:0}: Error finding container db49625ed76c02981a541d25acb969d42fb7c49c9c134ae4aeb734f2377ebef0: Status 404 returned error can't find the container with id db49625ed76c02981a541d25acb969d42fb7c49c9c134ae4aeb734f2377ebef0 Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.587984 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" event={"ID":"aa0e6902-572d-44e6-9a5b-efbbbf188e01","Type":"ContainerStarted","Data":"288442c7af0e7e1b8c235071e8fe79bbdfad9281be9d2e943aca14dc0cf9f60f"} Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.590068 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" event={"ID":"79972b49-f1f3-482e-88ac-63f5e249d537","Type":"ContainerStarted","Data":"db49625ed76c02981a541d25acb969d42fb7c49c9c134ae4aeb734f2377ebef0"} Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.591415 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" event={"ID":"7ec24093-c914-475e-86b9-2c4ba96791ff","Type":"ContainerStarted","Data":"7cb8e3b930be7184f613926e18f97c3c76a7e322d282fbe753bfb64647dccc56"} Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.597352 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" event={"ID":"f90b6c4a-a83b-48df-95ab-0240fee75881","Type":"ContainerStarted","Data":"96ac950734be6d154d090456b6fd57a1ee45525a9ce4cb0915dae2d264635c03"} Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.646918 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.665771 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0bc28b5a-7c1e-4252-828b-92057d3d6749-cert\") pod \"openstack-operator-controller-manager-6576b65d54-nl6qn\" (UID: \"0bc28b5a-7c1e-4252-828b-92057d3d6749\") " pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:34 crc kubenswrapper[4929]: I1122 07:30:34.905454 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.265178 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.271498 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh"] Nov 22 07:30:35 crc kubenswrapper[4929]: W1122 07:30:35.280254 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8422098_8e21_47a4_9a93_9e3eed95dde7.slice/crio-9909f65a83be31396dcfdce751bfce5a828dcf281e304c030df57a92a29bfde8 WatchSource:0}: Error finding container 9909f65a83be31396dcfdce751bfce5a828dcf281e304c030df57a92a29bfde8: Status 404 returned error can't find the container with id 9909f65a83be31396dcfdce751bfce5a828dcf281e304c030df57a92a29bfde8 Nov 22 07:30:35 crc kubenswrapper[4929]: W1122 07:30:35.281270 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3713fa6e_3e99_41b8_bfff_22c91a622841.slice/crio-11c23b3c6e4d654cac26605a2103625a7dd19c34010aaf16fddb42baca7a4f3e WatchSource:0}: Error finding container 11c23b3c6e4d654cac26605a2103625a7dd19c34010aaf16fddb42baca7a4f3e: Status 404 returned error can't find the container with id 11c23b3c6e4d654cac26605a2103625a7dd19c34010aaf16fddb42baca7a4f3e Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.294457 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.299718 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-b2l86"] Nov 22 07:30:35 crc kubenswrapper[4929]: W1122 07:30:35.304487 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod696093b1_6e6c_4f6d_915b_f1b5481834a5.slice/crio-ec2b924dfbca45428727954a3a8ee695575d56b3a664890ccc771348ced2b8fa WatchSource:0}: Error finding container ec2b924dfbca45428727954a3a8ee695575d56b3a664890ccc771348ced2b8fa: Status 404 returned error can't find the container with id ec2b924dfbca45428727954a3a8ee695575d56b3a664890ccc771348ced2b8fa Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.618497 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" event={"ID":"e8422098-8e21-47a4-9a93-9e3eed95dde7","Type":"ContainerStarted","Data":"9909f65a83be31396dcfdce751bfce5a828dcf281e304c030df57a92a29bfde8"} Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.627433 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" event={"ID":"f0d8223c-7551-48b6-8a56-fd43fb266534","Type":"ContainerStarted","Data":"13953e695ed9514f969c96a800b0c0a2518b36fea8ca538f86e04811292eee76"} Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.645555 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" event={"ID":"3713fa6e-3e99-41b8-bfff-22c91a622841","Type":"ContainerStarted","Data":"11c23b3c6e4d654cac26605a2103625a7dd19c34010aaf16fddb42baca7a4f3e"} Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.651417 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" event={"ID":"696093b1-6e6c-4f6d-915b-f1b5481834a5","Type":"ContainerStarted","Data":"ec2b924dfbca45428727954a3a8ee695575d56b3a664890ccc771348ced2b8fa"} Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.664984 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.675013 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.720191 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.738701 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.749054 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.759470 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.769568 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-2trxf"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.942452 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-b4c496f69-g64vx"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.957349 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.959871 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh"] Nov 22 07:30:35 crc kubenswrapper[4929]: W1122 07:30:35.961022 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91edab4b_0002_4a85_9ab9_9475a07e0be4.slice/crio-358b13a41608ccee9b4721bcaa2ebe7157c7fe0a7df85a6cda60eeed1d32a40d WatchSource:0}: Error finding container 358b13a41608ccee9b4721bcaa2ebe7157c7fe0a7df85a6cda60eeed1d32a40d: Status 404 returned error can't find the container with id 358b13a41608ccee9b4721bcaa2ebe7157c7fe0a7df85a6cda60eeed1d32a40d Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.970869 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz"] Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.976244 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c"] Nov 22 07:30:35 crc kubenswrapper[4929]: E1122 07:30:35.980847 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cwck2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-747b777d6b-wwnbh_openstack-operators(62c58af8-c121-43e3-b91c-524e2d2783bd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 07:30:35 crc kubenswrapper[4929]: E1122 07:30:35.989143 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z9gz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz_openstack-operators(d9191fcc-97a0-4e23-bc96-89dfbd474f25): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 07:30:35 crc kubenswrapper[4929]: I1122 07:30:35.993021 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4"] Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.017178 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn"] Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.030015 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62"] Nov 22 07:30:36 crc kubenswrapper[4929]: E1122 07:30:36.038188 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ftgxz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-cfbb9c588-ckg8c_openstack-operators(5eb78153-a103-4885-b710-406198b25403): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 07:30:36 crc kubenswrapper[4929]: W1122 07:30:36.051629 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0bc28b5a_7c1e_4252_828b_92057d3d6749.slice/crio-c9c8b1f9dc53f2a48b591751d4103c75cb1ab8e321379e149556b7f0bc97acc5 WatchSource:0}: Error finding container c9c8b1f9dc53f2a48b591751d4103c75cb1ab8e321379e149556b7f0bc97acc5: Status 404 returned error can't find the container with id c9c8b1f9dc53f2a48b591751d4103c75cb1ab8e321379e149556b7f0bc97acc5 Nov 22 07:30:36 crc kubenswrapper[4929]: E1122 07:30:36.077908 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fbqcz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-6dd8864d7c-62kf4_openstack-operators(ccb9635f-f142-4835-99be-509914bbeb1c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.665397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" event={"ID":"bf7b0c38-b9e7-4036-a9c0-9494b65c8714","Type":"ContainerStarted","Data":"91ed949c021987ce162f084e900fe3d948a0e1bdae4a3fc3329bb0e3cdb1160d"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.667188 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" event={"ID":"027a936c-fba7-49af-b9b7-e849b356c87c","Type":"ContainerStarted","Data":"6705ff675e99aa887053f7204890072e44214bee9a1b798bdf398ea0b3737cf0"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.668660 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" event={"ID":"9edcc36b-ecc0-4d9a-b549-e370b4a5e018","Type":"ContainerStarted","Data":"5f2eb0d25df18fcc8c19df5c8c6b1cc9637c6dd7a6e59d17c9aa76f07dfb8309"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.670018 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" event={"ID":"91edab4b-0002-4a85-9ab9-9475a07e0be4","Type":"ContainerStarted","Data":"358b13a41608ccee9b4721bcaa2ebe7157c7fe0a7df85a6cda60eeed1d32a40d"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.671639 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" event={"ID":"62c58af8-c121-43e3-b91c-524e2d2783bd","Type":"ContainerStarted","Data":"83d63b32bcfeb9cfe18adf5ad12f8d43efdb564787e468f82db1d241b73dabb8"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.671677 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" event={"ID":"62c58af8-c121-43e3-b91c-524e2d2783bd","Type":"ContainerStarted","Data":"f55cb4fa3bc98fcab1cfb5b2e5960898abcd0fdca9a388e7d13f0690d332d35e"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.672911 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" event={"ID":"4683d9d2-95c8-4fa3-a710-9b03cce32d1f","Type":"ContainerStarted","Data":"0e89efec08d6f70957858c2e676def3548322840eff3c1b427d23a77b7c41853"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.674411 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" event={"ID":"ccb9635f-f142-4835-99be-509914bbeb1c","Type":"ContainerStarted","Data":"a47fe30d704be18292eff35b9eeb531b1e785b823db6240b723fb6146f70cef9"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.676861 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" event={"ID":"d9191fcc-97a0-4e23-bc96-89dfbd474f25","Type":"ContainerStarted","Data":"837ccebb578904be6fa65c8d76bd1e9e1d0a19b4a0018173614fe176211df9e3"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.703400 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" event={"ID":"0bc28b5a-7c1e-4252-828b-92057d3d6749","Type":"ContainerStarted","Data":"c9c8b1f9dc53f2a48b591751d4103c75cb1ab8e321379e149556b7f0bc97acc5"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.708062 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" event={"ID":"38070268-a2fc-4876-a8c2-81dcd0bc7f28","Type":"ContainerStarted","Data":"bf0ac9dc0f1727659c1b3f5a71e68994e761abc8e5a485183cae2041eb1cb152"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.709422 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" event={"ID":"edea9302-eeed-48b3-a75f-170198cd1b2f","Type":"ContainerStarted","Data":"948039e20f27345f9bf04370f413c57476abaa3c37504fcafb03fe2901c80819"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.710469 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" event={"ID":"5eb78153-a103-4885-b710-406198b25403","Type":"ContainerStarted","Data":"110b828a6aa23762fcf6088c4db24ae3f9a79624c2d2787bad448199c5f604a9"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.713241 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" event={"ID":"80a127c3-bf01-4a74-8f00-9226fbb485cd","Type":"ContainerStarted","Data":"66af979df0f1992f466332321c07546d381bd3a504f778f5c5c6b3f2981cc4d3"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.718162 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" event={"ID":"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f","Type":"ContainerStarted","Data":"9d0977ccd9e9002de35c75177077666eb43b0612ac4d4c602dee2cac5f29bbd2"} Nov 22 07:30:36 crc kubenswrapper[4929]: I1122 07:30:36.719284 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" event={"ID":"3db96ba7-8db5-4603-bab2-bafe30ce4874","Type":"ContainerStarted","Data":"f4e568636e680336dfd337dd6e70e44f4d5e36d7ae40d553b2e135947d3a6a8e"} Nov 22 07:30:36 crc kubenswrapper[4929]: E1122 07:30:36.898872 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:30:36 crc kubenswrapper[4929]: E1122 07:30:36.921398 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podUID="d9191fcc-97a0-4e23-bc96-89dfbd474f25" Nov 22 07:30:36 crc kubenswrapper[4929]: E1122 07:30:36.921967 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" podUID="ccb9635f-f142-4835-99be-509914bbeb1c" Nov 22 07:30:37 crc kubenswrapper[4929]: E1122 07:30:37.103860 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podUID="5eb78153-a103-4885-b710-406198b25403" Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.752660 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" event={"ID":"ccb9635f-f142-4835-99be-509914bbeb1c","Type":"ContainerStarted","Data":"8431faf7cb75c8c18ebeb807e47e6d961b019c0912c66cbc7cda00aaa0abfc33"} Nov 22 07:30:37 crc kubenswrapper[4929]: E1122 07:30:37.757400 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" podUID="ccb9635f-f142-4835-99be-509914bbeb1c" Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.759691 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" event={"ID":"0bc28b5a-7c1e-4252-828b-92057d3d6749","Type":"ContainerStarted","Data":"9d5cc733a8ecfa979b9a8a7e963a558b9923cffa5fc39c36c7ba8e079d8807e2"} Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.759733 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" event={"ID":"0bc28b5a-7c1e-4252-828b-92057d3d6749","Type":"ContainerStarted","Data":"ec990f473a6ffa4690d1ed9ca5a9a9534832891c8eee12a4f5d6b6e64b0e2e83"} Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.760602 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.780059 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" event={"ID":"5eb78153-a103-4885-b710-406198b25403","Type":"ContainerStarted","Data":"229e06b2586ff683665bdc3a6be9ba12268a9d8baea781e22d2d33c752fdf988"} Nov 22 07:30:37 crc kubenswrapper[4929]: E1122 07:30:37.782283 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podUID="5eb78153-a103-4885-b710-406198b25403" Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.784069 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" event={"ID":"d9191fcc-97a0-4e23-bc96-89dfbd474f25","Type":"ContainerStarted","Data":"029e86153fb0df74aaec1b790812f621a5772b7a47a145cb98a4ccd2affb18de"} Nov 22 07:30:37 crc kubenswrapper[4929]: E1122 07:30:37.785869 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:30:37 crc kubenswrapper[4929]: E1122 07:30:37.785970 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podUID="d9191fcc-97a0-4e23-bc96-89dfbd474f25" Nov 22 07:30:37 crc kubenswrapper[4929]: I1122 07:30:37.825986 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" podStartSLOduration=4.8259659 podStartE2EDuration="4.8259659s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:30:37.82083805 +0000 UTC m=+1174.930292063" watchObservedRunningTime="2025-11-22 07:30:37.8259659 +0000 UTC m=+1174.935419913" Nov 22 07:30:38 crc kubenswrapper[4929]: E1122 07:30:38.790755 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podUID="5eb78153-a103-4885-b710-406198b25403" Nov 22 07:30:38 crc kubenswrapper[4929]: E1122 07:30:38.790979 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podUID="d9191fcc-97a0-4e23-bc96-89dfbd474f25" Nov 22 07:30:38 crc kubenswrapper[4929]: E1122 07:30:38.792136 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\"" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" podUID="ccb9635f-f142-4835-99be-509914bbeb1c" Nov 22 07:30:46 crc kubenswrapper[4929]: I1122 07:30:44.913602 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6576b65d54-nl6qn" Nov 22 07:30:48 crc kubenswrapper[4929]: E1122 07:30:48.817246 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6" Nov 22 07:30:48 crc kubenswrapper[4929]: E1122 07:30:48.817960 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:207578cb433471cc1a79c21a808c8a15489d1d3c9fa77e29f3f697c33917fec6,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-87ng9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-78bd47f458-knndh_openstack-operators(e8422098-8e21-47a4-9a93-9e3eed95dde7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:30:50 crc kubenswrapper[4929]: E1122 07:30:50.256204 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:70cce55bcf89468c5d468ca2fc317bfc3dc5f2bef1c502df9faca2eb1293ede7" Nov 22 07:30:50 crc kubenswrapper[4929]: E1122 07:30:50.256463 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:70cce55bcf89468c5d468ca2fc317bfc3dc5f2bef1c502df9faca2eb1293ede7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-p4pp9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-75fb479bcc-hlxc8_openstack-operators(aa0e6902-572d-44e6-9a5b-efbbbf188e01): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:30:52 crc kubenswrapper[4929]: E1122 07:30:52.008455 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0" Nov 22 07:30:52 crc kubenswrapper[4929]: E1122 07:30:52.009011 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kl9v7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d656998f4-2trxf_openstack-operators(edea9302-eeed-48b3-a75f-170198cd1b2f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:30:54 crc kubenswrapper[4929]: E1122 07:30:54.072677 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 22 07:30:54 crc kubenswrapper[4929]: E1122 07:30:54.072953 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8tn4b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-7969689c84-b2l86_openstack-operators(f0d8223c-7551-48b6-8a56-fd43fb266534): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:05 crc kubenswrapper[4929]: E1122 07:31:05.279696 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 22 07:31:05 crc kubenswrapper[4929]: E1122 07:31:05.281141 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jb5pm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58f887965d-9lr2t_openstack-operators(3713fa6e-3e99-41b8-bfff-22c91a622841): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.605852 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.606043 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:7b90521b9e9cb4eb43c2f1c3bf85dbd068d684315f4f705b07708dd078df9d04,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-stx8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-54b5986bb8-dzhvl_openstack-operators(696093b1-6e6c-4f6d-915b-f1b5481834a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.661325 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.661517 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9wtwg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6498cbf48f-h7h67_openstack-operators(f90b6c4a-a83b-48df-95ab-0240fee75881): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.990986 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b: Get \"http://38.102.83.196:5001/v2/openstack-k8s-operators/watcher-operator/blobs/sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b\": context canceled" image="38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.991040 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = reading blob sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b: Get \"http://38.102.83.196:5001/v2/openstack-k8s-operators/watcher-operator/blobs/sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b\": context canceled" image="38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.991409 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cwck2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-747b777d6b-wwnbh_openstack-operators(62c58af8-c121-43e3-b91c-524e2d2783bd): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b: Get \"http://38.102.83.196:5001/v2/openstack-k8s-operators/watcher-operator/blobs/sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b\": context canceled" logger="UnhandledError" Nov 22 07:31:06 crc kubenswrapper[4929]: E1122 07:31:06.993287 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b: Get \\\"http://38.102.83.196:5001/v2/openstack-k8s-operators/watcher-operator/blobs/sha256:2cdb997f7426cf116c3e87ebbf05976ec80f04e5efa98a2cb07d089d3980849b\\\": context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:31:07 crc kubenswrapper[4929]: E1122 07:31:07.203074 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f" Nov 22 07:31:07 crc kubenswrapper[4929]: E1122 07:31:07.203278 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pmrjh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-6d4bf84b58-xh4kd_openstack-operators(027a936c-fba7-49af-b9b7-e849b356c87c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:08 crc kubenswrapper[4929]: E1122 07:31:08.054012 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d" Nov 22 07:31:08 crc kubenswrapper[4929]: E1122 07:31:08.054737 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gkql9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-b4c496f69-g64vx_openstack-operators(91edab4b-0002-4a85-9ab9-9475a07e0be4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:08 crc kubenswrapper[4929]: E1122 07:31:08.864554 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f" Nov 22 07:31:08 crc kubenswrapper[4929]: E1122 07:31:08.864768 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ln4js,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-767ccfd65f-zldhw_openstack-operators(3db96ba7-8db5-4603-bab2-bafe30ce4874): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:09 crc kubenswrapper[4929]: E1122 07:31:09.478902 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9" Nov 22 07:31:09 crc kubenswrapper[4929]: E1122 07:31:09.479120 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-b8cgq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-598f69df5d-9tqpv_openstack-operators(80a127c3-bf01-4a74-8f00-9226fbb485cd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:11 crc kubenswrapper[4929]: E1122 07:31:11.915841 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13" Nov 22 07:31:11 crc kubenswrapper[4929]: E1122 07:31:11.916452 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dnlnz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-54cfbf4c7d-4ssf8_openstack-operators(bf7b0c38-b9e7-4036-a9c0-9494b65c8714): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:12 crc kubenswrapper[4929]: E1122 07:31:12.521027 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd" Nov 22 07:31:12 crc kubenswrapper[4929]: E1122 07:31:12.521700 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z9gz8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz_openstack-operators(d9191fcc-97a0-4e23-bc96-89dfbd474f25): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:12 crc kubenswrapper[4929]: E1122 07:31:12.523463 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podUID="d9191fcc-97a0-4e23-bc96-89dfbd474f25" Nov 22 07:31:13 crc kubenswrapper[4929]: E1122 07:31:13.335961 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 22 07:31:13 crc kubenswrapper[4929]: E1122 07:31:13.337359 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ftgxz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-cfbb9c588-ckg8c_openstack-operators(5eb78153-a103-4885-b710-406198b25403): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:13 crc kubenswrapper[4929]: E1122 07:31:13.339176 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podUID="5eb78153-a103-4885-b710-406198b25403" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.587540 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" podUID="91edab4b-0002-4a85-9ab9-9475a07e0be4" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.607549 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" podUID="696093b1-6e6c-4f6d-915b-f1b5481834a5" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.607837 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" podUID="3db96ba7-8db5-4603-bab2-bafe30ce4874" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.607914 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" podUID="027a936c-fba7-49af-b9b7-e849b356c87c" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.608339 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" podUID="aa0e6902-572d-44e6-9a5b-efbbbf188e01" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.619557 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.619751 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t4r99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-djv62_openstack-operators(4683d9d2-95c8-4fa3-a710-9b03cce32d1f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.620901 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" podUID="4683d9d2-95c8-4fa3-a710-9b03cce32d1f" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.666027 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" podUID="f90b6c4a-a83b-48df-95ab-0240fee75881" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.666528 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" podUID="edea9302-eeed-48b3-a75f-170198cd1b2f" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.666673 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" podUID="3713fa6e-3e99-41b8-bfff-22c91a622841" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.669683 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" podUID="80a127c3-bf01-4a74-8f00-9226fbb485cd" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.670054 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" podUID="f0d8223c-7551-48b6-8a56-fd43fb266534" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.671532 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" podUID="bf7b0c38-b9e7-4036-a9c0-9494b65c8714" Nov 22 07:31:16 crc kubenswrapper[4929]: E1122 07:31:16.671842 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" podUID="e8422098-8e21-47a4-9a93-9e3eed95dde7" Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.096479 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" event={"ID":"f0d8223c-7551-48b6-8a56-fd43fb266534","Type":"ContainerStarted","Data":"1c9ae5ee82309041ae8db735d46072455fb0590082e8b204424fcd978dc8d238"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.109809 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" event={"ID":"edea9302-eeed-48b3-a75f-170198cd1b2f","Type":"ContainerStarted","Data":"75bcff2916eec323f974e8c2bcb858c2d344896e9e1d2738b72e18dcd3efb54b"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.114540 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" event={"ID":"9edcc36b-ecc0-4d9a-b549-e370b4a5e018","Type":"ContainerStarted","Data":"412f73f3a3493eabc469a2b6c63ea6b3f13b9a1a0eda05e98d7691ccb917f6ae"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.128720 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" event={"ID":"bf7b0c38-b9e7-4036-a9c0-9494b65c8714","Type":"ContainerStarted","Data":"fe94a13a631c6c6ae6e6c97ef59104f6b58304e0df37479e166f804177c3bf21"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.145731 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" event={"ID":"79972b49-f1f3-482e-88ac-63f5e249d537","Type":"ContainerStarted","Data":"610cb581a6aef8cde79337ed29f1000088d097b97a88b9deb1a34ed2788fd2b2"} Nov 22 07:31:17 crc kubenswrapper[4929]: E1122 07:31:17.146853 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" podUID="bf7b0c38-b9e7-4036-a9c0-9494b65c8714" Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.157949 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" event={"ID":"7ec24093-c914-475e-86b9-2c4ba96791ff","Type":"ContainerStarted","Data":"74ba038a44a584664491609a4915da809031c73b4ddbd42fafdfa02f12d67f56"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.162677 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" event={"ID":"38070268-a2fc-4876-a8c2-81dcd0bc7f28","Type":"ContainerStarted","Data":"119feda88fe0b423bf8f97ece0c950f19efc9beb167af91a514791e0031d295f"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.174975 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" event={"ID":"aa0e6902-572d-44e6-9a5b-efbbbf188e01","Type":"ContainerStarted","Data":"1319494670be43cb8e59082093d3a94d298a896986cd64cdc801bef4613bdce1"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.206910 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" event={"ID":"e8422098-8e21-47a4-9a93-9e3eed95dde7","Type":"ContainerStarted","Data":"c915c2276d1b0e2e2e6266444c2334aa802af8ce053c62df6dc907d85b8b0894"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.221272 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" event={"ID":"3713fa6e-3e99-41b8-bfff-22c91a622841","Type":"ContainerStarted","Data":"75b68b72ba132584d8309d610bae44331dee61fb6bbf053d456007485008bac0"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.227803 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" event={"ID":"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f","Type":"ContainerStarted","Data":"a7c2aacc17dd6ba2d85937db265d4575fd89dfd0d4be6cf95806fbcbb1c239f1"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.243613 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" event={"ID":"3db96ba7-8db5-4603-bab2-bafe30ce4874","Type":"ContainerStarted","Data":"14c2c15a65d55d04161f25b4d974d0f565eee39f9995267dc0fd0b2e152867d6"} Nov 22 07:31:17 crc kubenswrapper[4929]: E1122 07:31:17.245056 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f\\\"\"" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" podUID="3db96ba7-8db5-4603-bab2-bafe30ce4874" Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.246282 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" event={"ID":"f90b6c4a-a83b-48df-95ab-0240fee75881","Type":"ContainerStarted","Data":"eb42846145a17ea1089b9474098f50a09febbc262916f321d0c175b6fcc4f164"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.277151 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" event={"ID":"027a936c-fba7-49af-b9b7-e849b356c87c","Type":"ContainerStarted","Data":"4c629b49553f75e1870751d6fc5dc03f9ad0c4dd65b23e08200bd60156a495d2"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.296560 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" event={"ID":"696093b1-6e6c-4f6d-915b-f1b5481834a5","Type":"ContainerStarted","Data":"45c1840e01a3d539a8f7b27c9d187285ee2a990c2576606ed6d8718746d8a392"} Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.313903 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" event={"ID":"80a127c3-bf01-4a74-8f00-9226fbb485cd","Type":"ContainerStarted","Data":"1024c782187330c5e9f729c6d87e560a1521141e59a7f291d81290017d0698bc"} Nov 22 07:31:17 crc kubenswrapper[4929]: E1122 07:31:17.330156 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" podUID="80a127c3-bf01-4a74-8f00-9226fbb485cd" Nov 22 07:31:17 crc kubenswrapper[4929]: I1122 07:31:17.342361 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" event={"ID":"91edab4b-0002-4a85-9ab9-9475a07e0be4","Type":"ContainerStarted","Data":"239677fe334a3f24743c2c22befa81ec41effc2144bfaf60e1691ae22d79c511"} Nov 22 07:31:17 crc kubenswrapper[4929]: E1122 07:31:17.342680 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" podUID="91edab4b-0002-4a85-9ab9-9475a07e0be4" Nov 22 07:31:17 crc kubenswrapper[4929]: E1122 07:31:17.344441 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" podUID="4683d9d2-95c8-4fa3-a710-9b03cce32d1f" Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.348437 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" event={"ID":"7ec24093-c914-475e-86b9-2c4ba96791ff","Type":"ContainerStarted","Data":"5a52140af74b8b9411b38dd8548d5f41d01fd4ab021a19dd030e118e3667cdca"} Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.349770 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" event={"ID":"1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f","Type":"ContainerStarted","Data":"1c644a8109ab3d45af9a0bd56c1a429e19020aa77bfa0abbe957ebf9b88d4012"} Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.351231 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" event={"ID":"38070268-a2fc-4876-a8c2-81dcd0bc7f28","Type":"ContainerStarted","Data":"518b49ff9e9204eb0f1b960427705c24b412f7e41d3620906445a62f539e0df8"} Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.354037 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" event={"ID":"79972b49-f1f3-482e-88ac-63f5e249d537","Type":"ContainerStarted","Data":"1b59d03eda0333ff48c4c7cead81ac15beac7787e5015ae855dc3253303998e2"} Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.356122 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" event={"ID":"ccb9635f-f142-4835-99be-509914bbeb1c","Type":"ContainerStarted","Data":"1a72e96466b41928eae8144540cde047951b6d59450a9dd4e963401d5b8ddea5"} Nov 22 07:31:18 crc kubenswrapper[4929]: E1122 07:31:18.357385 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:848f4c43c6bdd4e33e3ce1d147a85b9b6a6124a150bd5155dce421ef539259e9\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" podUID="80a127c3-bf01-4a74-8f00-9226fbb485cd" Nov 22 07:31:18 crc kubenswrapper[4929]: E1122 07:31:18.358436 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" podUID="bf7b0c38-b9e7-4036-a9c0-9494b65c8714" Nov 22 07:31:18 crc kubenswrapper[4929]: E1122 07:31:18.358701 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:c6405d94e56b40ef669729216ab4b9c441f34bb280902efa2940038c076b560f\\\"\"" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" podUID="3db96ba7-8db5-4603-bab2-bafe30ce4874" Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.594126 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:31:18 crc kubenswrapper[4929]: I1122 07:31:18.594245 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.377118 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" event={"ID":"9edcc36b-ecc0-4d9a-b549-e370b4a5e018","Type":"ContainerStarted","Data":"d0ddc63411363e49f02d7664585cfc52aa8602c36ccdcb70e794a1f73486e511"} Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.377515 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.377543 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.377563 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.377582 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.402559 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" podStartSLOduration=30.060215539 podStartE2EDuration="47.402500748s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:34.518440813 +0000 UTC m=+1171.627894826" lastFinishedPulling="2025-11-22 07:30:51.860726022 +0000 UTC m=+1188.970180035" observedRunningTime="2025-11-22 07:31:19.401577655 +0000 UTC m=+1216.511031738" watchObservedRunningTime="2025-11-22 07:31:19.402500748 +0000 UTC m=+1216.511954801" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.430648 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" podStartSLOduration=9.681320722 podStartE2EDuration="46.430618157s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.757750134 +0000 UTC m=+1172.867204147" lastFinishedPulling="2025-11-22 07:31:12.507047539 +0000 UTC m=+1209.616501582" observedRunningTime="2025-11-22 07:31:19.420467051 +0000 UTC m=+1216.529921104" watchObservedRunningTime="2025-11-22 07:31:19.430618157 +0000 UTC m=+1216.540072180" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.446007 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" podStartSLOduration=14.142338962 podStartE2EDuration="47.445990045s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:34.378265496 +0000 UTC m=+1171.487719509" lastFinishedPulling="2025-11-22 07:31:07.681916559 +0000 UTC m=+1204.791370592" observedRunningTime="2025-11-22 07:31:19.438703591 +0000 UTC m=+1216.548157654" watchObservedRunningTime="2025-11-22 07:31:19.445990045 +0000 UTC m=+1216.555444058" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.464904 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" podStartSLOduration=6.57896002 podStartE2EDuration="47.464884732s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:36.077700227 +0000 UTC m=+1173.187154240" lastFinishedPulling="2025-11-22 07:31:16.963624939 +0000 UTC m=+1214.073078952" observedRunningTime="2025-11-22 07:31:19.46084294 +0000 UTC m=+1216.570296963" watchObservedRunningTime="2025-11-22 07:31:19.464884732 +0000 UTC m=+1216.574338745" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.486856 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" podStartSLOduration=7.756111132 podStartE2EDuration="47.486831136s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.766053203 +0000 UTC m=+1172.875507216" lastFinishedPulling="2025-11-22 07:31:15.496773177 +0000 UTC m=+1212.606227220" observedRunningTime="2025-11-22 07:31:19.480826494 +0000 UTC m=+1216.590280557" watchObservedRunningTime="2025-11-22 07:31:19.486831136 +0000 UTC m=+1216.596285169" Nov 22 07:31:19 crc kubenswrapper[4929]: I1122 07:31:19.508079 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" podStartSLOduration=12.904439698000001 podStartE2EDuration="46.508052781s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.775879071 +0000 UTC m=+1172.885333084" lastFinishedPulling="2025-11-22 07:31:09.379492144 +0000 UTC m=+1206.488946167" observedRunningTime="2025-11-22 07:31:19.499054134 +0000 UTC m=+1216.608508177" watchObservedRunningTime="2025-11-22 07:31:19.508052781 +0000 UTC m=+1216.617506834" Nov 22 07:31:20 crc kubenswrapper[4929]: I1122 07:31:20.384931 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:31:21 crc kubenswrapper[4929]: I1122 07:31:21.395631 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-m7z2n" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.316886 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-q5bvk" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.342723 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-rrdbn" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.525718 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.528396 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-56f54d6746-wg5hx" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.767192 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-54fc5f65b7-2lnbz" Nov 22 07:31:23 crc kubenswrapper[4929]: E1122 07:31:23.835558 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:31:23 crc kubenswrapper[4929]: I1122 07:31:23.892687 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-6dd8864d7c-62kf4" Nov 22 07:31:25 crc kubenswrapper[4929]: E1122 07:31:25.354425 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podUID="d9191fcc-97a0-4e23-bc96-89dfbd474f25" Nov 22 07:31:25 crc kubenswrapper[4929]: E1122 07:31:25.949121 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podUID="5eb78153-a103-4885-b710-406198b25403" Nov 22 07:31:27 crc kubenswrapper[4929]: I1122 07:31:27.672332 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" event={"ID":"3713fa6e-3e99-41b8-bfff-22c91a622841","Type":"ContainerStarted","Data":"dc4bcc737f0c971db3e4656b17564355263c78cbb98950a64518f2f025f320fb"} Nov 22 07:31:27 crc kubenswrapper[4929]: I1122 07:31:27.674892 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" event={"ID":"edea9302-eeed-48b3-a75f-170198cd1b2f","Type":"ContainerStarted","Data":"be6be314b09aafbde398121f8df94d3b61e14517cfaba176c6170d8e08242d25"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.704698 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" event={"ID":"e8422098-8e21-47a4-9a93-9e3eed95dde7","Type":"ContainerStarted","Data":"5de8d14e0e4d395657e7e8c1163b41a6cdf15608f458873bd91e62c7f339013e"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.715518 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" event={"ID":"91edab4b-0002-4a85-9ab9-9475a07e0be4","Type":"ContainerStarted","Data":"86f486c3248ae3a03f3dadda41971374e213a254c7ec4d7cb830108bca794b21"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.718294 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" event={"ID":"f0d8223c-7551-48b6-8a56-fd43fb266534","Type":"ContainerStarted","Data":"a60848001bc7bb1b3c0713784b629d0d0aa8c2d9c2afa848eac49d80bcdc1cb0"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.722161 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" event={"ID":"f90b6c4a-a83b-48df-95ab-0240fee75881","Type":"ContainerStarted","Data":"92c2c6cb4ec215d022eb64190292f641f4b23da63cf003db0f5b6241ada43d37"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.724058 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" event={"ID":"027a936c-fba7-49af-b9b7-e849b356c87c","Type":"ContainerStarted","Data":"dbd0aca8c5768adb77725f18a16b46894084ffdd56ed5ab48ea28a32f91f19b7"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.727647 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" event={"ID":"aa0e6902-572d-44e6-9a5b-efbbbf188e01","Type":"ContainerStarted","Data":"3c96890fa8828b4af21a1ba946ceed6502682f6cdb8fb5242aac0f64560e5f6b"} Nov 22 07:31:29 crc kubenswrapper[4929]: I1122 07:31:29.730071 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" event={"ID":"696093b1-6e6c-4f6d-915b-f1b5481834a5","Type":"ContainerStarted","Data":"02b28694577a31d5062388c679ee5b1f22cabe553543d31c503992467a7309e4"} Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.737175 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.737864 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.738627 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.755341 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" podStartSLOduration=8.336659421 podStartE2EDuration="58.755320316s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.288688608 +0000 UTC m=+1172.398142621" lastFinishedPulling="2025-11-22 07:31:25.707349493 +0000 UTC m=+1222.816803516" observedRunningTime="2025-11-22 07:31:30.751971581 +0000 UTC m=+1227.861425594" watchObservedRunningTime="2025-11-22 07:31:30.755320316 +0000 UTC m=+1227.864774329" Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.783535 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" podStartSLOduration=8.382286713 podStartE2EDuration="58.783514317s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.305824761 +0000 UTC m=+1172.415278774" lastFinishedPulling="2025-11-22 07:31:25.707052345 +0000 UTC m=+1222.816506378" observedRunningTime="2025-11-22 07:31:30.769243137 +0000 UTC m=+1227.878697170" watchObservedRunningTime="2025-11-22 07:31:30.783514317 +0000 UTC m=+1227.892968330" Nov 22 07:31:30 crc kubenswrapper[4929]: I1122 07:31:30.792661 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" podStartSLOduration=8.415498493 podStartE2EDuration="57.792644508s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.802820491 +0000 UTC m=+1172.912274504" lastFinishedPulling="2025-11-22 07:31:25.179966506 +0000 UTC m=+1222.289420519" observedRunningTime="2025-11-22 07:31:30.787793745 +0000 UTC m=+1227.897247758" watchObservedRunningTime="2025-11-22 07:31:30.792644508 +0000 UTC m=+1227.902098521" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.747404 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.747795 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.749034 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-2trxf" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.777370 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" podStartSLOduration=9.38051728 podStartE2EDuration="59.777345053s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.282719308 +0000 UTC m=+1172.392173321" lastFinishedPulling="2025-11-22 07:31:25.679547091 +0000 UTC m=+1222.789001094" observedRunningTime="2025-11-22 07:31:31.770156702 +0000 UTC m=+1228.879610755" watchObservedRunningTime="2025-11-22 07:31:31.777345053 +0000 UTC m=+1228.886799096" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.843246 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" podStartSLOduration=9.089309804 podStartE2EDuration="58.843194005s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.956396166 +0000 UTC m=+1173.065850179" lastFinishedPulling="2025-11-22 07:31:25.710280357 +0000 UTC m=+1222.819734380" observedRunningTime="2025-11-22 07:31:31.828824132 +0000 UTC m=+1228.938278195" watchObservedRunningTime="2025-11-22 07:31:31.843194005 +0000 UTC m=+1228.952648028" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.863076 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" podStartSLOduration=8.368856743 podStartE2EDuration="59.863059456s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:34.219698865 +0000 UTC m=+1171.329152868" lastFinishedPulling="2025-11-22 07:31:25.713901548 +0000 UTC m=+1222.823355581" observedRunningTime="2025-11-22 07:31:31.86082469 +0000 UTC m=+1228.970278743" watchObservedRunningTime="2025-11-22 07:31:31.863059456 +0000 UTC m=+1228.972513479" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.885753 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" podStartSLOduration=9.189679887 podStartE2EDuration="58.885732038s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.963077295 +0000 UTC m=+1173.072531308" lastFinishedPulling="2025-11-22 07:31:25.659129446 +0000 UTC m=+1222.768583459" observedRunningTime="2025-11-22 07:31:31.881785309 +0000 UTC m=+1228.991239332" watchObservedRunningTime="2025-11-22 07:31:31.885732038 +0000 UTC m=+1228.995186061" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.929638 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" podStartSLOduration=9.589365489 podStartE2EDuration="59.929618236s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.318022708 +0000 UTC m=+1172.427476721" lastFinishedPulling="2025-11-22 07:31:25.658275455 +0000 UTC m=+1222.767729468" observedRunningTime="2025-11-22 07:31:31.923794599 +0000 UTC m=+1229.033248612" watchObservedRunningTime="2025-11-22 07:31:31.929618236 +0000 UTC m=+1229.039072249" Nov 22 07:31:31 crc kubenswrapper[4929]: I1122 07:31:31.930036 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" podStartSLOduration=8.25304386 podStartE2EDuration="59.930030166s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:34.002369581 +0000 UTC m=+1171.111823594" lastFinishedPulling="2025-11-22 07:31:25.679355877 +0000 UTC m=+1222.788809900" observedRunningTime="2025-11-22 07:31:31.909894068 +0000 UTC m=+1229.019348101" watchObservedRunningTime="2025-11-22 07:31:31.930030166 +0000 UTC m=+1229.039484179" Nov 22 07:31:33 crc kubenswrapper[4929]: I1122 07:31:33.150279 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:31:33 crc kubenswrapper[4929]: I1122 07:31:33.168201 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:31:33 crc kubenswrapper[4929]: I1122 07:31:33.398476 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-9lr2t" Nov 22 07:31:33 crc kubenswrapper[4929]: I1122 07:31:33.518591 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:31:33 crc kubenswrapper[4929]: I1122 07:31:33.978892 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.801472 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" event={"ID":"80a127c3-bf01-4a74-8f00-9226fbb485cd","Type":"ContainerStarted","Data":"79eafd459534512640090a8b33a682e8452aca0a5c86de86b0bc362e08113a02"} Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.802614 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.804404 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" event={"ID":"4683d9d2-95c8-4fa3-a710-9b03cce32d1f","Type":"ContainerStarted","Data":"c302cdd49cac91d5351ea185cee07bd5849dccb09c71c2ee6c38fb698f71dd9f"} Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.806275 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" event={"ID":"bf7b0c38-b9e7-4036-a9c0-9494b65c8714","Type":"ContainerStarted","Data":"afdb7ec91d99e0276f5ead6547c458f2b9bc72a67f1f7b80c880d798b53d4229"} Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.806425 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.825359 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" podStartSLOduration=4.673617513 podStartE2EDuration="1m2.825342882s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.730195929 +0000 UTC m=+1172.839649932" lastFinishedPulling="2025-11-22 07:31:33.881921288 +0000 UTC m=+1230.991375301" observedRunningTime="2025-11-22 07:31:34.821372322 +0000 UTC m=+1231.930826335" watchObservedRunningTime="2025-11-22 07:31:34.825342882 +0000 UTC m=+1231.934796895" Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.853505 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-djv62" podStartSLOduration=4.130765458 podStartE2EDuration="1m1.853482462s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:36.158137847 +0000 UTC m=+1173.267591860" lastFinishedPulling="2025-11-22 07:31:33.880854851 +0000 UTC m=+1230.990308864" observedRunningTime="2025-11-22 07:31:34.834661247 +0000 UTC m=+1231.944115260" watchObservedRunningTime="2025-11-22 07:31:34.853482462 +0000 UTC m=+1231.962936475" Nov 22 07:31:34 crc kubenswrapper[4929]: I1122 07:31:34.854588 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" podStartSLOduration=3.747299933 podStartE2EDuration="1m1.85458167s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.773912512 +0000 UTC m=+1172.883366525" lastFinishedPulling="2025-11-22 07:31:33.881194249 +0000 UTC m=+1230.990648262" observedRunningTime="2025-11-22 07:31:34.84826775 +0000 UTC m=+1231.957721773" watchObservedRunningTime="2025-11-22 07:31:34.85458167 +0000 UTC m=+1231.964035683" Nov 22 07:31:36 crc kubenswrapper[4929]: I1122 07:31:36.824798 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" event={"ID":"3db96ba7-8db5-4603-bab2-bafe30ce4874","Type":"ContainerStarted","Data":"e0b803dfb4889436eeb2c5fc7ef2b6960c54d35f823f0f6ba8c0a12856b05e93"} Nov 22 07:31:36 crc kubenswrapper[4929]: I1122 07:31:36.825351 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:31:36 crc kubenswrapper[4929]: I1122 07:31:36.849324 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" podStartSLOduration=4.476436949 podStartE2EDuration="1m4.849291892s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.707023744 +0000 UTC m=+1172.816477757" lastFinishedPulling="2025-11-22 07:31:36.079878647 +0000 UTC m=+1233.189332700" observedRunningTime="2025-11-22 07:31:36.840128171 +0000 UTC m=+1233.949582184" watchObservedRunningTime="2025-11-22 07:31:36.849291892 +0000 UTC m=+1233.958745945" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.153858 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-75fb479bcc-hlxc8" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.170524 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-h7h67" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.440890 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-54b5986bb8-dzhvl" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.477110 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-78bd47f458-knndh" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.500327 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-zldhw" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.525381 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-b2l86" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.567002 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-9tqpv" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.675894 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-4ssf8" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.904556 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-xh4kd" Nov 22 07:31:43 crc kubenswrapper[4929]: I1122 07:31:43.981886 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-b4c496f69-g64vx" Nov 22 07:31:48 crc kubenswrapper[4929]: I1122 07:31:48.594832 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:31:48 crc kubenswrapper[4929]: I1122 07:31:48.595176 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:32:03 crc kubenswrapper[4929]: E1122 07:32:03.913098 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29" Nov 22 07:32:03 crc kubenswrapper[4929]: E1122 07:32:03.913706 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29" Nov 22 07:32:03 crc kubenswrapper[4929]: E1122 07:32:03.913913 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cwck2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-747b777d6b-wwnbh_openstack-operators(62c58af8-c121-43e3-b91c-524e2d2783bd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:32:03 crc kubenswrapper[4929]: E1122 07:32:03.916382 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.057849 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" event={"ID":"d9191fcc-97a0-4e23-bc96-89dfbd474f25","Type":"ContainerStarted","Data":"c394968ffefcc8d9ca9ae409d9fc86624821a4d63d953e7f9787d97cb038a9da"} Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.058262 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.060315 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" event={"ID":"5eb78153-a103-4885-b710-406198b25403","Type":"ContainerStarted","Data":"c5cfa7924b830bcaf2181e910c3f3b147712dbe631e3281a25dbfaec6522bd6b"} Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.060868 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.091766 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" podStartSLOduration=3.345008292 podStartE2EDuration="1m31.091746822s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.988679461 +0000 UTC m=+1173.098133474" lastFinishedPulling="2025-11-22 07:32:03.735417991 +0000 UTC m=+1260.844872004" observedRunningTime="2025-11-22 07:32:04.087670829 +0000 UTC m=+1261.197124832" watchObservedRunningTime="2025-11-22 07:32:04.091746822 +0000 UTC m=+1261.201200835" Nov 22 07:32:04 crc kubenswrapper[4929]: I1122 07:32:04.109593 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" podStartSLOduration=4.410382572 podStartE2EDuration="1m32.109572202s" podCreationTimestamp="2025-11-22 07:30:32 +0000 UTC" firstStartedPulling="2025-11-22 07:30:36.037669737 +0000 UTC m=+1173.147123750" lastFinishedPulling="2025-11-22 07:32:03.736859367 +0000 UTC m=+1260.846313380" observedRunningTime="2025-11-22 07:32:04.10277133 +0000 UTC m=+1261.212225343" watchObservedRunningTime="2025-11-22 07:32:04.109572202 +0000 UTC m=+1261.219026215" Nov 22 07:32:13 crc kubenswrapper[4929]: I1122 07:32:13.569838 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-ckg8c" Nov 22 07:32:14 crc kubenswrapper[4929]: I1122 07:32:14.404100 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz" Nov 22 07:32:15 crc kubenswrapper[4929]: E1122 07:32:15.950538 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:32:18 crc kubenswrapper[4929]: I1122 07:32:18.594676 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:32:18 crc kubenswrapper[4929]: I1122 07:32:18.595030 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:32:18 crc kubenswrapper[4929]: I1122 07:32:18.595077 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:32:18 crc kubenswrapper[4929]: I1122 07:32:18.595671 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:32:18 crc kubenswrapper[4929]: I1122 07:32:18.595733 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1" gracePeriod=600 Nov 22 07:32:19 crc kubenswrapper[4929]: I1122 07:32:19.198229 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1" exitCode=0 Nov 22 07:32:19 crc kubenswrapper[4929]: I1122 07:32:19.198774 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1"} Nov 22 07:32:19 crc kubenswrapper[4929]: I1122 07:32:19.198808 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c"} Nov 22 07:32:19 crc kubenswrapper[4929]: I1122 07:32:19.198828 4929 scope.go:117] "RemoveContainer" containerID="feb5786acfc814aec51c20213fe9cfe83614e95b20f84e104dde0c09f6c7dd83" Nov 22 07:32:28 crc kubenswrapper[4929]: E1122 07:32:28.949516 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:32:40 crc kubenswrapper[4929]: E1122 07:32:40.949422 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/openstack-k8s-operators/watcher-operator:05df33742087a9ad8e8dd92626b1730a09013b29\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podUID="62c58af8-c121-43e3-b91c-524e2d2783bd" Nov 22 07:32:57 crc kubenswrapper[4929]: I1122 07:32:57.498314 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" event={"ID":"62c58af8-c121-43e3-b91c-524e2d2783bd","Type":"ContainerStarted","Data":"b751b19b90e05515aaf910952e5053d0ab8f4761016751081450f12283395b63"} Nov 22 07:32:57 crc kubenswrapper[4929]: I1122 07:32:57.499960 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:32:57 crc kubenswrapper[4929]: I1122 07:32:57.519448 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" podStartSLOduration=3.8254353070000002 podStartE2EDuration="2m24.519424278s" podCreationTimestamp="2025-11-22 07:30:33 +0000 UTC" firstStartedPulling="2025-11-22 07:30:35.98069847 +0000 UTC m=+1173.090152483" lastFinishedPulling="2025-11-22 07:32:56.674687441 +0000 UTC m=+1313.784141454" observedRunningTime="2025-11-22 07:32:57.517686236 +0000 UTC m=+1314.627140249" watchObservedRunningTime="2025-11-22 07:32:57.519424278 +0000 UTC m=+1314.628878301" Nov 22 07:33:04 crc kubenswrapper[4929]: I1122 07:33:04.102551 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-747b777d6b-wwnbh" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.504869 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.509412 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.516166 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.516481 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-k2lvs" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.516665 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.522715 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.522811 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.564095 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7mwv\" (UniqueName: \"kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.564433 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.572818 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.574852 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.579912 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.580632 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.665909 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7mwv\" (UniqueName: \"kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.665976 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.666012 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.666063 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.666112 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ddsr\" (UniqueName: \"kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.667444 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.687188 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7mwv\" (UniqueName: \"kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv\") pod \"dnsmasq-dns-675f4bcbfc-cx86p\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.766877 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ddsr\" (UniqueName: \"kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.767252 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.767304 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.768170 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.769448 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.786712 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ddsr\" (UniqueName: \"kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr\") pod \"dnsmasq-dns-78dd6ddcc-wlblj\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.847480 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:23 crc kubenswrapper[4929]: I1122 07:33:23.894380 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:24 crc kubenswrapper[4929]: I1122 07:33:24.278684 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:24 crc kubenswrapper[4929]: I1122 07:33:24.357335 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:24 crc kubenswrapper[4929]: W1122 07:33:24.360349 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57539eb2_2c3a_4aa4_bcbd_838621769f35.slice/crio-e09de2e2e368ce67ca34af08c5326c99c3f6566a62b3ace509e3e9bc7a3e1608 WatchSource:0}: Error finding container e09de2e2e368ce67ca34af08c5326c99c3f6566a62b3ace509e3e9bc7a3e1608: Status 404 returned error can't find the container with id e09de2e2e368ce67ca34af08c5326c99c3f6566a62b3ace509e3e9bc7a3e1608 Nov 22 07:33:24 crc kubenswrapper[4929]: I1122 07:33:24.770291 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" event={"ID":"a8e87ff1-3774-401d-ae24-728b40319f22","Type":"ContainerStarted","Data":"985d8f5c1d37142789b073ee36af8cc151ca4a182367d4dffb2c171829614694"} Nov 22 07:33:24 crc kubenswrapper[4929]: I1122 07:33:24.771502 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" event={"ID":"57539eb2-2c3a-4aa4-bcbd-838621769f35","Type":"ContainerStarted","Data":"e09de2e2e368ce67ca34af08c5326c99c3f6566a62b3ace509e3e9bc7a3e1608"} Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.667188 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.689972 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.691135 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.698415 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.815389 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.815463 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v7cm\" (UniqueName: \"kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.815503 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.917167 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.917308 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5v7cm\" (UniqueName: \"kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.917347 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.918262 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.918797 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.966689 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v7cm\" (UniqueName: \"kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm\") pod \"dnsmasq-dns-666b6646f7-ddtc6\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:26 crc kubenswrapper[4929]: I1122 07:33:26.972627 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.008514 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.015625 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.021083 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.028748 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.120725 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zhb8\" (UniqueName: \"kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.121116 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.121184 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.222181 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zhb8\" (UniqueName: \"kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.222242 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.222287 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.224318 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.225056 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.260741 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zhb8\" (UniqueName: \"kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8\") pod \"dnsmasq-dns-57d769cc4f-pdvz8\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.351718 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.627135 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:33:27 crc kubenswrapper[4929]: W1122 07:33:27.628429 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb99c6a04_d311_41c0_9b1e_675bcbb75aeb.slice/crio-addff9a46bdf74a40a7506137efdebb39a52faee5600b849d5bee4f1ca13c021 WatchSource:0}: Error finding container addff9a46bdf74a40a7506137efdebb39a52faee5600b849d5bee4f1ca13c021: Status 404 returned error can't find the container with id addff9a46bdf74a40a7506137efdebb39a52faee5600b849d5bee4f1ca13c021 Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.651146 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:33:27 crc kubenswrapper[4929]: W1122 07:33:27.661587 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8e420a12_2fa0_41d7_97f0_1045b245bcea.slice/crio-ba6a5581846253e32b8913ade3ce04b74f33732244532929fa1eb7367069e314 WatchSource:0}: Error finding container ba6a5581846253e32b8913ade3ce04b74f33732244532929fa1eb7367069e314: Status 404 returned error can't find the container with id ba6a5581846253e32b8913ade3ce04b74f33732244532929fa1eb7367069e314 Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.797788 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" event={"ID":"b99c6a04-d311-41c0-9b1e-675bcbb75aeb","Type":"ContainerStarted","Data":"addff9a46bdf74a40a7506137efdebb39a52faee5600b849d5bee4f1ca13c021"} Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.800291 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" event={"ID":"8e420a12-2fa0-41d7-97f0-1045b245bcea","Type":"ContainerStarted","Data":"ba6a5581846253e32b8913ade3ce04b74f33732244532929fa1eb7367069e314"} Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.848814 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.850958 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.854999 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.855029 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.855681 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.855758 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-pbcs9" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.855784 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.855865 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.856037 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.868295 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936183 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/76073571-729d-4de5-bda6-780d28ae6a9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936288 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936325 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936363 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txjkf\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-kube-api-access-txjkf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936515 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936612 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936691 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936722 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936772 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/76073571-729d-4de5-bda6-780d28ae6a9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936892 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:27 crc kubenswrapper[4929]: I1122 07:33:27.936971 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038461 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038495 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038522 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txjkf\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-kube-api-access-txjkf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038564 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038595 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038641 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038684 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038717 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/76073571-729d-4de5-bda6-780d28ae6a9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038737 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038752 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.038802 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/76073571-729d-4de5-bda6-780d28ae6a9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.039752 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.040106 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-config-data\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.040321 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.040445 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.040711 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-server-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.041123 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/76073571-729d-4de5-bda6-780d28ae6a9b-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.045726 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.045765 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/76073571-729d-4de5-bda6-780d28ae6a9b-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.055380 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.055429 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/76073571-729d-4de5-bda6-780d28ae6a9b-pod-info\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.058024 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txjkf\" (UniqueName: \"kubernetes.io/projected/76073571-729d-4de5-bda6-780d28ae6a9b-kube-api-access-txjkf\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.079756 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"76073571-729d-4de5-bda6-780d28ae6a9b\") " pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.123796 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.129726 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.132231 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.132273 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hfnj5" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.132282 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.132423 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.134408 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.135868 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.136265 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.136336 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.183757 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242178 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242280 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242304 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242341 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/941ef061-1085-45ca-84e2-60447bb10c47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242355 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242373 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qzl8\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-kube-api-access-7qzl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.242389 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.243034 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.243101 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.243139 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/941ef061-1085-45ca-84e2-60447bb10c47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.243172 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344483 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/941ef061-1085-45ca-84e2-60447bb10c47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344777 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344797 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qzl8\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-kube-api-access-7qzl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344812 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344830 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344856 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344883 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/941ef061-1085-45ca-84e2-60447bb10c47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344907 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344941 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344978 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.344997 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.345467 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.346528 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.346676 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.346989 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.347394 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.347393 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/941ef061-1085-45ca-84e2-60447bb10c47-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.349919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/941ef061-1085-45ca-84e2-60447bb10c47-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.349938 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/941ef061-1085-45ca-84e2-60447bb10c47-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.350796 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.351763 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.368942 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qzl8\" (UniqueName: \"kubernetes.io/projected/941ef061-1085-45ca-84e2-60447bb10c47-kube-api-access-7qzl8\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.375872 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"941ef061-1085-45ca-84e2-60447bb10c47\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:28 crc kubenswrapper[4929]: I1122 07:33:28.458861 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.463860 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.470923 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.474902 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.475236 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.475373 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.475558 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-bl6xg" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.526887 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.528880 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.572836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n82dd\" (UniqueName: \"kubernetes.io/projected/6a23e882-732c-416e-bfc5-c91517389f64-kube-api-access-n82dd\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573090 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-kolla-config\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573188 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6a23e882-732c-416e-bfc5-c91517389f64-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573305 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-config-data-default\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573398 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573470 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573570 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.573628 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.675363 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n82dd\" (UniqueName: \"kubernetes.io/projected/6a23e882-732c-416e-bfc5-c91517389f64-kube-api-access-n82dd\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.675434 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-kolla-config\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.675484 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6a23e882-732c-416e-bfc5-c91517389f64-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676368 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-config-data-default\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676417 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-kolla-config\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676530 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676564 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/6a23e882-732c-416e-bfc5-c91517389f64-config-data-generated\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676571 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676657 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676699 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.676975 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-config-data-default\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.677011 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.677805 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6a23e882-732c-416e-bfc5-c91517389f64-operator-scripts\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.685363 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.690577 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/6a23e882-732c-416e-bfc5-c91517389f64-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.710043 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n82dd\" (UniqueName: \"kubernetes.io/projected/6a23e882-732c-416e-bfc5-c91517389f64-kube-api-access-n82dd\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.719128 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"6a23e882-732c-416e-bfc5-c91517389f64\") " pod="openstack/openstack-galera-0" Nov 22 07:33:29 crc kubenswrapper[4929]: I1122 07:33:29.797189 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.917594 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.919716 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.921631 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.921776 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.921945 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.922054 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-v8tvl" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.932618 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999406 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq7w7\" (UniqueName: \"kubernetes.io/projected/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kube-api-access-cq7w7\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999468 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999517 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999730 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999808 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:30 crc kubenswrapper[4929]: I1122 07:33:30.999869 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:30.999931 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.000023 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.102241 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.102697 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.102918 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.103189 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.103445 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.103809 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.104123 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq7w7\" (UniqueName: \"kubernetes.io/projected/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kube-api-access-cq7w7\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.104604 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.104752 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.105203 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.105554 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.105752 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.109100 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.109760 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.119983 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.137984 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.146971 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq7w7\" (UniqueName: \"kubernetes.io/projected/d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2-kube-api-access-cq7w7\") pod \"openstack-cell1-galera-0\" (UID: \"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2\") " pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.280096 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.289898 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.290975 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.292970 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.293138 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.294484 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-tq485" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.310305 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.409108 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.409195 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-kolla-config\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.409433 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.409476 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-config-data\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.409497 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkfhg\" (UniqueName: \"kubernetes.io/projected/619c7624-23e8-4578-8361-b9c91a56e5c9-kube-api-access-kkfhg\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.512911 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.512984 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkfhg\" (UniqueName: \"kubernetes.io/projected/619c7624-23e8-4578-8361-b9c91a56e5c9-kube-api-access-kkfhg\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.513014 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-config-data\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.513044 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.513094 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-kolla-config\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.514018 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-kolla-config\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.516977 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/619c7624-23e8-4578-8361-b9c91a56e5c9-config-data\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.518900 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.520895 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/619c7624-23e8-4578-8361-b9c91a56e5c9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.532734 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkfhg\" (UniqueName: \"kubernetes.io/projected/619c7624-23e8-4578-8361-b9c91a56e5c9-kube-api-access-kkfhg\") pod \"memcached-0\" (UID: \"619c7624-23e8-4578-8361-b9c91a56e5c9\") " pod="openstack/memcached-0" Nov 22 07:33:31 crc kubenswrapper[4929]: I1122 07:33:31.614307 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 07:33:32 crc kubenswrapper[4929]: I1122 07:33:32.996178 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:33:32 crc kubenswrapper[4929]: I1122 07:33:32.997725 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.000448 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-x2qb7" Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.011299 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.039963 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s97vp\" (UniqueName: \"kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp\") pod \"kube-state-metrics-0\" (UID: \"18c31ba0-cf68-45bf-87da-d04ab3bd8b21\") " pod="openstack/kube-state-metrics-0" Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.141474 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s97vp\" (UniqueName: \"kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp\") pod \"kube-state-metrics-0\" (UID: \"18c31ba0-cf68-45bf-87da-d04ab3bd8b21\") " pod="openstack/kube-state-metrics-0" Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.158098 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s97vp\" (UniqueName: \"kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp\") pod \"kube-state-metrics-0\" (UID: \"18c31ba0-cf68-45bf-87da-d04ab3bd8b21\") " pod="openstack/kube-state-metrics-0" Nov 22 07:33:33 crc kubenswrapper[4929]: I1122 07:33:33.320554 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.292076 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.294513 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.297310 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.297490 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.297625 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.297805 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.298484 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-r6m55" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.304253 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.318153 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.358965 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359059 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359122 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359148 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359192 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359222 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8w4x\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359357 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.359419 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.460831 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.460886 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.460926 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.460967 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.460987 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.461030 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.461046 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8w4x\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.461088 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.462107 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.465927 4929 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.465964 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/26190b449a803e3db988a49e35880969bbb223ecf15515b6482b10e2b9da6530/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.466087 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.466656 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.468331 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.468740 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.471037 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.482633 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8w4x\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.500474 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:34 crc kubenswrapper[4929]: I1122 07:33:34.615379 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.566476 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6p98g"] Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.569017 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.571815 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-gglj6"] Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.573404 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.577047 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.577305 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.577501 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8bdkw" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.584034 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g"] Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.597457 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gglj6"] Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603721 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603763 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-ovn-controller-tls-certs\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-combined-ca-bundle\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603837 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603890 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a17264d7-e93b-4448-9ed8-0be507a9120f-scripts\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603937 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fgcw\" (UniqueName: \"kubernetes.io/projected/a17264d7-e93b-4448-9ed8-0be507a9120f-kube-api-access-5fgcw\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.603954 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-log-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705494 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-log\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705591 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7p6p\" (UniqueName: \"kubernetes.io/projected/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-kube-api-access-c7p6p\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705630 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705680 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-scripts\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705713 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-log-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705729 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fgcw\" (UniqueName: \"kubernetes.io/projected/a17264d7-e93b-4448-9ed8-0be507a9120f-kube-api-access-5fgcw\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705755 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-etc-ovs\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705776 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705797 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-ovn-controller-tls-certs\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705853 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-combined-ca-bundle\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705906 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-run\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705932 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-lib\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.705967 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a17264d7-e93b-4448-9ed8-0be507a9120f-scripts\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.706266 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-log-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.706353 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run-ovn\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.706521 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/a17264d7-e93b-4448-9ed8-0be507a9120f-var-run\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.708603 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a17264d7-e93b-4448-9ed8-0be507a9120f-scripts\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.712922 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-ovn-controller-tls-certs\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.714028 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a17264d7-e93b-4448-9ed8-0be507a9120f-combined-ca-bundle\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.722289 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fgcw\" (UniqueName: \"kubernetes.io/projected/a17264d7-e93b-4448-9ed8-0be507a9120f-kube-api-access-5fgcw\") pod \"ovn-controller-6p98g\" (UID: \"a17264d7-e93b-4448-9ed8-0be507a9120f\") " pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.807905 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7p6p\" (UniqueName: \"kubernetes.io/projected/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-kube-api-access-c7p6p\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.807985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-scripts\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808024 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-etc-ovs\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808058 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-run\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808079 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-lib\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808111 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-log\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808501 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-log\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808565 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-run\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.808681 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-var-lib\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.811109 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-scripts\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.826614 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7p6p\" (UniqueName: \"kubernetes.io/projected/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-kube-api-access-c7p6p\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.900581 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g" Nov 22 07:33:36 crc kubenswrapper[4929]: I1122 07:33:36.998184 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8-etc-ovs\") pod \"ovn-controller-ovs-gglj6\" (UID: \"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8\") " pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:37 crc kubenswrapper[4929]: I1122 07:33:37.209088 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.871526 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.873173 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.878234 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.878371 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-8jjtt" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.878234 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.878549 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.878619 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.887122 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945728 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945806 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945833 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945853 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbsc4\" (UniqueName: \"kubernetes.io/projected/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-kube-api-access-kbsc4\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945885 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945913 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945942 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:38 crc kubenswrapper[4929]: I1122 07:33:38.945962 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.047314 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.047630 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.047744 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbsc4\" (UniqueName: \"kubernetes.io/projected/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-kube-api-access-kbsc4\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.047874 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.047976 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.048116 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.048231 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.048557 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.048699 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.048778 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-config\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.049001 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.049427 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.054853 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.056743 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.058131 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.069766 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbsc4\" (UniqueName: \"kubernetes.io/projected/4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d-kube-api-access-kbsc4\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.079145 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d\") " pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:39 crc kubenswrapper[4929]: I1122 07:33:39.240323 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.708895 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.710424 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.711824 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-p6mp5" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.712171 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.712437 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.713530 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.728351 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.876949 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877007 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-config\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877027 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877727 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7rzk7\" (UniqueName: \"kubernetes.io/projected/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-kube-api-access-7rzk7\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877758 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877806 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.877836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979346 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7rzk7\" (UniqueName: \"kubernetes.io/projected/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-kube-api-access-7rzk7\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979622 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979729 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979772 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979841 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979910 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.979980 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-config\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.980005 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.980271 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.980288 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.980920 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.981205 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-config\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.985505 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.986152 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:40 crc kubenswrapper[4929]: I1122 07:33:40.989117 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:41 crc kubenswrapper[4929]: I1122 07:33:41.000834 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7rzk7\" (UniqueName: \"kubernetes.io/projected/2886f6ad-e9ae-48fa-b9eb-9688a1022f29-kube-api-access-7rzk7\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:41 crc kubenswrapper[4929]: I1122 07:33:41.023422 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-nb-0\" (UID: \"2886f6ad-e9ae-48fa-b9eb-9688a1022f29\") " pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:41 crc kubenswrapper[4929]: I1122 07:33:41.037795 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 07:33:43 crc kubenswrapper[4929]: E1122 07:33:43.473765 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage1845207334/5\": happened during read: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 07:33:43 crc kubenswrapper[4929]: E1122 07:33:43.474202 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-z7mwv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-cx86p_openstack(a8e87ff1-3774-401d-ae24-728b40319f22): ErrImagePull: rpc error: code = Canceled desc = writing blob: storing blob to file \"/var/tmp/container_images_storage1845207334/5\": happened during read: context canceled" logger="UnhandledError" Nov 22 07:33:43 crc kubenswrapper[4929]: E1122 07:33:43.475401 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = writing blob: storing blob to file \\\"/var/tmp/container_images_storage1845207334/5\\\": happened during read: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" podUID="a8e87ff1-3774-401d-ae24-728b40319f22" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.327812 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.476427 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config\") pod \"a8e87ff1-3774-401d-ae24-728b40319f22\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.476505 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7mwv\" (UniqueName: \"kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv\") pod \"a8e87ff1-3774-401d-ae24-728b40319f22\" (UID: \"a8e87ff1-3774-401d-ae24-728b40319f22\") " Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.477387 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config" (OuterVolumeSpecName: "config") pod "a8e87ff1-3774-401d-ae24-728b40319f22" (UID: "a8e87ff1-3774-401d-ae24-728b40319f22"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.479673 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8e87ff1-3774-401d-ae24-728b40319f22-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.484022 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv" (OuterVolumeSpecName: "kube-api-access-z7mwv") pod "a8e87ff1-3774-401d-ae24-728b40319f22" (UID: "a8e87ff1-3774-401d-ae24-728b40319f22"). InnerVolumeSpecName "kube-api-access-z7mwv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.581725 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7mwv\" (UniqueName: \"kubernetes.io/projected/a8e87ff1-3774-401d-ae24-728b40319f22-kube-api-access-z7mwv\") on node \"crc\" DevicePath \"\"" Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.665116 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.998672 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" event={"ID":"a8e87ff1-3774-401d-ae24-728b40319f22","Type":"ContainerDied","Data":"985d8f5c1d37142789b073ee36af8cc151ca4a182367d4dffb2c171829614694"} Nov 22 07:33:46 crc kubenswrapper[4929]: I1122 07:33:46.998756 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-cx86p" Nov 22 07:33:47 crc kubenswrapper[4929]: I1122 07:33:47.063813 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:47 crc kubenswrapper[4929]: I1122 07:33:47.071186 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-cx86p"] Nov 22 07:33:47 crc kubenswrapper[4929]: I1122 07:33:47.978034 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8e87ff1-3774-401d-ae24-728b40319f22" path="/var/lib/kubelet/pods/a8e87ff1-3774-401d-ae24-728b40319f22/volumes" Nov 22 07:33:53 crc kubenswrapper[4929]: I1122 07:33:53.064123 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"619c7624-23e8-4578-8361-b9c91a56e5c9","Type":"ContainerStarted","Data":"1ebfebd255981913c8bca93e97a5a92fb31609d1c42bc9ea861fe2eff2f12cbd"} Nov 22 07:33:53 crc kubenswrapper[4929]: I1122 07:33:53.343832 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 07:33:53 crc kubenswrapper[4929]: I1122 07:33:53.393256 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 07:33:53 crc kubenswrapper[4929]: I1122 07:33:53.478289 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 07:33:54 crc kubenswrapper[4929]: W1122 07:33:54.934134 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a23e882_732c_416e_bfc5_c91517389f64.slice/crio-c808a0d8f6a5fbcd758bd5253caf15c1afbe53124caf1501bc1d21aa2195356c WatchSource:0}: Error finding container c808a0d8f6a5fbcd758bd5253caf15c1afbe53124caf1501bc1d21aa2195356c: Status 404 returned error can't find the container with id c808a0d8f6a5fbcd758bd5253caf15c1afbe53124caf1501bc1d21aa2195356c Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.117705 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"76073571-729d-4de5-bda6-780d28ae6a9b","Type":"ContainerStarted","Data":"b0de0fc1f3de212e8b88beb40289ca54df3e9548e84c4d4b9aa6fe611ca20067"} Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.122912 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6a23e882-732c-416e-bfc5-c91517389f64","Type":"ContainerStarted","Data":"c808a0d8f6a5fbcd758bd5253caf15c1afbe53124caf1501bc1d21aa2195356c"} Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.125345 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2","Type":"ContainerStarted","Data":"5f627dae69a588c5cb89f95c8d19d7eac0241090189f0e60a6269b0a4f1c847e"} Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.421359 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.520756 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.526535 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g"] Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.533239 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:33:55 crc kubenswrapper[4929]: W1122 07:33:55.536437 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfad488a3_81d2_42c9_9140_9ca981927e13.slice/crio-b5e914878d646a386cf3afe950d9573e0470e916e2e8f13a7dc9fbfb0933d6b1 WatchSource:0}: Error finding container b5e914878d646a386cf3afe950d9573e0470e916e2e8f13a7dc9fbfb0933d6b1: Status 404 returned error can't find the container with id b5e914878d646a386cf3afe950d9573e0470e916e2e8f13a7dc9fbfb0933d6b1 Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.676251 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 07:33:55 crc kubenswrapper[4929]: I1122 07:33:55.834132 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.142343 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18c31ba0-cf68-45bf-87da-d04ab3bd8b21","Type":"ContainerStarted","Data":"a8a26f7fcf573a6fc6367957223b8ecfcd4caf2c7823262e3443ec785e6c03e3"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.143407 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d","Type":"ContainerStarted","Data":"b34bea1c7e70622059ce97c1493142ca9835db1fd2d0c9f909a751e9e2c68c77"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.144598 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g" event={"ID":"a17264d7-e93b-4448-9ed8-0be507a9120f","Type":"ContainerStarted","Data":"7fe8f8fa8843780d9b4f77c31c5a7e9cba0a06a834c1ad394ea4a92450bf4198"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.146308 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"941ef061-1085-45ca-84e2-60447bb10c47","Type":"ContainerStarted","Data":"b092c12c55bd45a2c104db682aec9ee7e5377b5682dca209e44832d76fdd2206"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.148239 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2886f6ad-e9ae-48fa-b9eb-9688a1022f29","Type":"ContainerStarted","Data":"4d37de53b1ef823de80d419c1641a8cd0440709170eeaa8c87972a85fcbd0b3d"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.150097 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerStarted","Data":"b5e914878d646a386cf3afe950d9573e0470e916e2e8f13a7dc9fbfb0933d6b1"} Nov 22 07:33:56 crc kubenswrapper[4929]: I1122 07:33:56.738881 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-gglj6"] Nov 22 07:33:56 crc kubenswrapper[4929]: E1122 07:33:56.874720 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 07:33:56 crc kubenswrapper[4929]: E1122 07:33:56.874896 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5ddsr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-wlblj_openstack(57539eb2-2c3a-4aa4-bcbd-838621769f35): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:33:56 crc kubenswrapper[4929]: E1122 07:33:56.876143 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" podUID="57539eb2-2c3a-4aa4-bcbd-838621769f35" Nov 22 07:33:57 crc kubenswrapper[4929]: W1122 07:33:57.002552 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod001fa5a3_fac0_4b3f_99dc_bd910c8e2bb8.slice/crio-7a9a5d7dc617327fc3a24e625b4ff1f043518dc6b5e3d5434198cf6cc62e5903 WatchSource:0}: Error finding container 7a9a5d7dc617327fc3a24e625b4ff1f043518dc6b5e3d5434198cf6cc62e5903: Status 404 returned error can't find the container with id 7a9a5d7dc617327fc3a24e625b4ff1f043518dc6b5e3d5434198cf6cc62e5903 Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.160560 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gglj6" event={"ID":"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8","Type":"ContainerStarted","Data":"7a9a5d7dc617327fc3a24e625b4ff1f043518dc6b5e3d5434198cf6cc62e5903"} Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.426847 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.572607 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config\") pod \"57539eb2-2c3a-4aa4-bcbd-838621769f35\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.572689 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc\") pod \"57539eb2-2c3a-4aa4-bcbd-838621769f35\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.572719 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ddsr\" (UniqueName: \"kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr\") pod \"57539eb2-2c3a-4aa4-bcbd-838621769f35\" (UID: \"57539eb2-2c3a-4aa4-bcbd-838621769f35\") " Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.573178 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config" (OuterVolumeSpecName: "config") pod "57539eb2-2c3a-4aa4-bcbd-838621769f35" (UID: "57539eb2-2c3a-4aa4-bcbd-838621769f35"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.573265 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "57539eb2-2c3a-4aa4-bcbd-838621769f35" (UID: "57539eb2-2c3a-4aa4-bcbd-838621769f35"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.576555 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr" (OuterVolumeSpecName: "kube-api-access-5ddsr") pod "57539eb2-2c3a-4aa4-bcbd-838621769f35" (UID: "57539eb2-2c3a-4aa4-bcbd-838621769f35"). InnerVolumeSpecName "kube-api-access-5ddsr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.674938 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.674971 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/57539eb2-2c3a-4aa4-bcbd-838621769f35-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:33:57 crc kubenswrapper[4929]: I1122 07:33:57.674981 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ddsr\" (UniqueName: \"kubernetes.io/projected/57539eb2-2c3a-4aa4-bcbd-838621769f35-kube-api-access-5ddsr\") on node \"crc\" DevicePath \"\"" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.169799 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" event={"ID":"57539eb2-2c3a-4aa4-bcbd-838621769f35","Type":"ContainerDied","Data":"e09de2e2e368ce67ca34af08c5326c99c3f6566a62b3ace509e3e9bc7a3e1608"} Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.169885 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wlblj" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.172468 4929 generic.go:334] "Generic (PLEG): container finished" podID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerID="76ebaae925f0b81d7b00cd5a05c44811300cc800023df70cd275c5ce00d5b8c2" exitCode=0 Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.172562 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" event={"ID":"b99c6a04-d311-41c0-9b1e-675bcbb75aeb","Type":"ContainerDied","Data":"76ebaae925f0b81d7b00cd5a05c44811300cc800023df70cd275c5ce00d5b8c2"} Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.252384 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.255315 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wlblj"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.675790 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-p8r4f"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.677432 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.679431 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.687442 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-p8r4f"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711358 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vh9h\" (UniqueName: \"kubernetes.io/projected/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-kube-api-access-7vh9h\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711436 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovn-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711524 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovs-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711554 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711580 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-combined-ca-bundle\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.711621 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-config\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.816786 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovs-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.816865 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.816904 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-combined-ca-bundle\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.816999 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-config\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.817066 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vh9h\" (UniqueName: \"kubernetes.io/projected/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-kube-api-access-7vh9h\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.817089 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovn-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.818043 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovn-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.818120 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-ovs-rundir\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.819984 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-config\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.821274 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.825102 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-combined-ca-bundle\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.827946 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.845448 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vh9h\" (UniqueName: \"kubernetes.io/projected/06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380-kube-api-access-7vh9h\") pod \"ovn-controller-metrics-p8r4f\" (UID: \"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380\") " pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.847372 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.849440 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.855153 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.864571 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.918772 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.918841 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h8g9\" (UniqueName: \"kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.918922 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:58 crc kubenswrapper[4929]: I1122 07:33:58.918983 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.014564 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-p8r4f" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.020614 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.020711 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.020801 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.020826 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h8g9\" (UniqueName: \"kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.021968 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.022375 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.022868 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.023114 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.043648 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h8g9\" (UniqueName: \"kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9\") pod \"dnsmasq-dns-7f896c8c65-4p6db\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.069319 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.071067 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.080408 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.117872 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.122038 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.122134 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlvnl\" (UniqueName: \"kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.122187 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.122209 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.122279 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.190687 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" event={"ID":"b99c6a04-d311-41c0-9b1e-675bcbb75aeb","Type":"ContainerStarted","Data":"a6d8d0c9e63947fad0aa9141bee7784227483d4e6a4dd56f80fa1d5deac7383f"} Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.191649 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.191947 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.223575 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.223629 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlvnl\" (UniqueName: \"kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.223687 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.223708 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.223724 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.224932 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.225469 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.225654 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.226073 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.237540 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" podStartSLOduration=3.815418989 podStartE2EDuration="33.237512269s" podCreationTimestamp="2025-11-22 07:33:26 +0000 UTC" firstStartedPulling="2025-11-22 07:33:27.630628779 +0000 UTC m=+1344.740082792" lastFinishedPulling="2025-11-22 07:33:57.052722059 +0000 UTC m=+1374.162176072" observedRunningTime="2025-11-22 07:33:59.220756162 +0000 UTC m=+1376.330210175" watchObservedRunningTime="2025-11-22 07:33:59.237512269 +0000 UTC m=+1376.346966282" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.252965 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlvnl\" (UniqueName: \"kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl\") pod \"dnsmasq-dns-86db49b7ff-pk7mc\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.465536 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.612791 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-p8r4f"] Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.674820 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.919317 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:33:59 crc kubenswrapper[4929]: W1122 07:33:59.929160 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4a5840d_58f8_4312_854c_b927be25fc3f.slice/crio-14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac WatchSource:0}: Error finding container 14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac: Status 404 returned error can't find the container with id 14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac Nov 22 07:33:59 crc kubenswrapper[4929]: I1122 07:33:59.958004 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57539eb2-2c3a-4aa4-bcbd-838621769f35" path="/var/lib/kubelet/pods/57539eb2-2c3a-4aa4-bcbd-838621769f35/volumes" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.201942 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerStarted","Data":"14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac"} Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.203563 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerStarted","Data":"58b9a0d44a2ca920537a18ec9a7760e15c98c34a227500bffbdfa211e24e1efb"} Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.203584 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerStarted","Data":"9b972d4840dba9f5fbc5b80230ecf3c0e1dd477a74996c80faf4a913ebabd3d9"} Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.205314 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e420a12-2fa0-41d7-97f0-1045b245bcea" containerID="90089f017dd370922f57b825bca9f3d364bff20792960672c06d44839af48859" exitCode=0 Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.205395 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" event={"ID":"8e420a12-2fa0-41d7-97f0-1045b245bcea","Type":"ContainerDied","Data":"90089f017dd370922f57b825bca9f3d364bff20792960672c06d44839af48859"} Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.206503 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p8r4f" event={"ID":"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380","Type":"ContainerStarted","Data":"d6b8b5d698b476110ad44386bc81e12f33865b2d050393ced1dedda7c9d224d4"} Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.206675 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="dnsmasq-dns" containerID="cri-o://a6d8d0c9e63947fad0aa9141bee7784227483d4e6a4dd56f80fa1d5deac7383f" gracePeriod=10 Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.563768 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.651423 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config\") pod \"8e420a12-2fa0-41d7-97f0-1045b245bcea\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.651625 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zhb8\" (UniqueName: \"kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8\") pod \"8e420a12-2fa0-41d7-97f0-1045b245bcea\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.651713 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc\") pod \"8e420a12-2fa0-41d7-97f0-1045b245bcea\" (UID: \"8e420a12-2fa0-41d7-97f0-1045b245bcea\") " Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.667005 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8" (OuterVolumeSpecName: "kube-api-access-7zhb8") pod "8e420a12-2fa0-41d7-97f0-1045b245bcea" (UID: "8e420a12-2fa0-41d7-97f0-1045b245bcea"). InnerVolumeSpecName "kube-api-access-7zhb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.672850 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config" (OuterVolumeSpecName: "config") pod "8e420a12-2fa0-41d7-97f0-1045b245bcea" (UID: "8e420a12-2fa0-41d7-97f0-1045b245bcea"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.678309 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8e420a12-2fa0-41d7-97f0-1045b245bcea" (UID: "8e420a12-2fa0-41d7-97f0-1045b245bcea"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.759539 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zhb8\" (UniqueName: \"kubernetes.io/projected/8e420a12-2fa0-41d7-97f0-1045b245bcea-kube-api-access-7zhb8\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.759599 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:00 crc kubenswrapper[4929]: I1122 07:34:00.759613 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8e420a12-2fa0-41d7-97f0-1045b245bcea-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.218644 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.218742 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-pdvz8" event={"ID":"8e420a12-2fa0-41d7-97f0-1045b245bcea","Type":"ContainerDied","Data":"ba6a5581846253e32b8913ade3ce04b74f33732244532929fa1eb7367069e314"} Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.218811 4929 scope.go:117] "RemoveContainer" containerID="90089f017dd370922f57b825bca9f3d364bff20792960672c06d44839af48859" Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.278275 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerStarted","Data":"45fe7d2bb56386a921f77f472c5727cf1f9b0002470c0b94367283e159b4f98d"} Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.286377 4929 generic.go:334] "Generic (PLEG): container finished" podID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerID="a6d8d0c9e63947fad0aa9141bee7784227483d4e6a4dd56f80fa1d5deac7383f" exitCode=0 Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.286460 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" event={"ID":"b99c6a04-d311-41c0-9b1e-675bcbb75aeb","Type":"ContainerDied","Data":"a6d8d0c9e63947fad0aa9141bee7784227483d4e6a4dd56f80fa1d5deac7383f"} Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.291114 4929 generic.go:334] "Generic (PLEG): container finished" podID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerID="58b9a0d44a2ca920537a18ec9a7760e15c98c34a227500bffbdfa211e24e1efb" exitCode=0 Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.292255 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerDied","Data":"58b9a0d44a2ca920537a18ec9a7760e15c98c34a227500bffbdfa211e24e1efb"} Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.320525 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.334431 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-pdvz8"] Nov 22 07:34:01 crc kubenswrapper[4929]: I1122 07:34:01.961123 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e420a12-2fa0-41d7-97f0-1045b245bcea" path="/var/lib/kubelet/pods/8e420a12-2fa0-41d7-97f0-1045b245bcea/volumes" Nov 22 07:34:03 crc kubenswrapper[4929]: I1122 07:34:03.307229 4929 generic.go:334] "Generic (PLEG): container finished" podID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerID="45fe7d2bb56386a921f77f472c5727cf1f9b0002470c0b94367283e159b4f98d" exitCode=0 Nov 22 07:34:03 crc kubenswrapper[4929]: I1122 07:34:03.307323 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerDied","Data":"45fe7d2bb56386a921f77f472c5727cf1f9b0002470c0b94367283e159b4f98d"} Nov 22 07:34:05 crc kubenswrapper[4929]: I1122 07:34:05.931501 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.041997 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5v7cm\" (UniqueName: \"kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm\") pod \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.042332 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc\") pod \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.042376 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config\") pod \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\" (UID: \"b99c6a04-d311-41c0-9b1e-675bcbb75aeb\") " Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.047662 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm" (OuterVolumeSpecName: "kube-api-access-5v7cm") pod "b99c6a04-d311-41c0-9b1e-675bcbb75aeb" (UID: "b99c6a04-d311-41c0-9b1e-675bcbb75aeb"). InnerVolumeSpecName "kube-api-access-5v7cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.082635 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b99c6a04-d311-41c0-9b1e-675bcbb75aeb" (UID: "b99c6a04-d311-41c0-9b1e-675bcbb75aeb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.098395 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config" (OuterVolumeSpecName: "config") pod "b99c6a04-d311-41c0-9b1e-675bcbb75aeb" (UID: "b99c6a04-d311-41c0-9b1e-675bcbb75aeb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.143807 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.143837 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.143845 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5v7cm\" (UniqueName: \"kubernetes.io/projected/b99c6a04-d311-41c0-9b1e-675bcbb75aeb-kube-api-access-5v7cm\") on node \"crc\" DevicePath \"\"" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.337908 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" event={"ID":"b99c6a04-d311-41c0-9b1e-675bcbb75aeb","Type":"ContainerDied","Data":"addff9a46bdf74a40a7506137efdebb39a52faee5600b849d5bee4f1ca13c021"} Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.337981 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-ddtc6" Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.371967 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:34:06 crc kubenswrapper[4929]: I1122 07:34:06.378094 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-ddtc6"] Nov 22 07:34:07 crc kubenswrapper[4929]: I1122 07:34:07.965013 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" path="/var/lib/kubelet/pods/b99c6a04-d311-41c0-9b1e-675bcbb75aeb/volumes" Nov 22 07:34:12 crc kubenswrapper[4929]: E1122 07:34:12.542466 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 22 07:34:12 crc kubenswrapper[4929]: E1122 07:34:12.543657 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7qzl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(941ef061-1085-45ca-84e2-60447bb10c47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:12 crc kubenswrapper[4929]: E1122 07:34:12.545004 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" Nov 22 07:34:14 crc kubenswrapper[4929]: E1122 07:34:14.229086 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" Nov 22 07:34:14 crc kubenswrapper[4929]: I1122 07:34:14.229184 4929 scope.go:117] "RemoveContainer" containerID="a6d8d0c9e63947fad0aa9141bee7784227483d4e6a4dd56f80fa1d5deac7383f" Nov 22 07:34:17 crc kubenswrapper[4929]: E1122 07:34:17.949175 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Nov 22 07:34:17 crc kubenswrapper[4929]: E1122 07:34:17.949929 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n584h5c5h54dh66hdbh5b6hbbh697h667hc6h95h78h5f4h565hf7h56dh57dh586h67ch54dhd9hb4h679hdch558h59ch67hbh77h8fh59fh65bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c7p6p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-gglj6_openstack(001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:17 crc kubenswrapper[4929]: E1122 07:34:17.951588 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-gglj6" podUID="001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8" Nov 22 07:34:18 crc kubenswrapper[4929]: E1122 07:34:18.460189 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-gglj6" podUID="001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8" Nov 22 07:34:23 crc kubenswrapper[4929]: E1122 07:34:23.376764 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 22 07:34:23 crc kubenswrapper[4929]: E1122 07:34:23.377611 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cq7w7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:23 crc kubenswrapper[4929]: E1122 07:34:23.378875 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" Nov 22 07:34:23 crc kubenswrapper[4929]: E1122 07:34:23.508199 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" Nov 22 07:34:25 crc kubenswrapper[4929]: I1122 07:34:25.949298 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:34:29 crc kubenswrapper[4929]: E1122 07:34:29.318039 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62" Nov 22 07:34:29 crc kubenswrapper[4929]: E1122 07:34:29.318594 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init-config-reloader,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62,Command:[/bin/prometheus-config-reloader],Args:[--watch-interval=0 --listen-address=:8081 --config-file=/etc/prometheus/config/prometheus.yaml.gz --config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml --watched-dir=/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:reloader-init,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:SHARD,Value:0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/prometheus/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-out,ReadOnly:false,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8w4x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(fad488a3-81d2-42c9-9140-9ca981927e13): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:34:29 crc kubenswrapper[4929]: E1122 07:34:29.319862 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" Nov 22 07:34:29 crc kubenswrapper[4929]: E1122 07:34:29.546973 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init-config-reloader\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62\\\"\"" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" Nov 22 07:34:35 crc kubenswrapper[4929]: E1122 07:34:35.096070 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 22 07:34:35 crc kubenswrapper[4929]: E1122 07:34:35.097385 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-txjkf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(76073571-729d-4de5-bda6-780d28ae6a9b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:35 crc kubenswrapper[4929]: E1122 07:34:35.098632 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" Nov 22 07:34:35 crc kubenswrapper[4929]: E1122 07:34:35.600611 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" Nov 22 07:34:46 crc kubenswrapper[4929]: E1122 07:34:46.916801 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Nov 22 07:34:46 crc kubenswrapper[4929]: E1122 07:34:46.917865 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n598h669h695h5b4hb4h659h5bchb5h88hcfh58bhfdh59dh5dfh9h697hbbh5d8hd6h5d4h588h55ch6h5d5hddh5fhd7h678h67fh5dbh78h5d7q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7rzk7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(2886f6ad-e9ae-48fa-b9eb-9688a1022f29): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:48 crc kubenswrapper[4929]: I1122 07:34:48.594712 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:34:48 crc kubenswrapper[4929]: I1122 07:34:48.594783 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:34:52 crc kubenswrapper[4929]: E1122 07:34:52.849839 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 22 07:34:52 crc kubenswrapper[4929]: E1122 07:34:52.850765 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:nb4h6fh675h7h69h6h8hc6h5c5h645h5f5h565h579h5c5h4h6h584h564h55bh678h9bh6ch89h88hdbh598hc8h99h58dh5cdh8hb8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kkfhg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(619c7624-23e8-4578-8361-b9c91a56e5c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:52 crc kubenswrapper[4929]: E1122 07:34:52.853789 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="619c7624-23e8-4578-8361-b9c91a56e5c9" Nov 22 07:34:53 crc kubenswrapper[4929]: E1122 07:34:53.749269 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="619c7624-23e8-4578-8361-b9c91a56e5c9" Nov 22 07:34:59 crc kubenswrapper[4929]: E1122 07:34:59.603524 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 22 07:34:59 crc kubenswrapper[4929]: E1122 07:34:59.604185 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n82dd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(6a23e882-732c-416e-bfc5-c91517389f64): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:34:59 crc kubenswrapper[4929]: E1122 07:34:59.605458 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="6a23e882-732c-416e-bfc5-c91517389f64" Nov 22 07:34:59 crc kubenswrapper[4929]: E1122 07:34:59.795851 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="6a23e882-732c-416e-bfc5-c91517389f64" Nov 22 07:35:09 crc kubenswrapper[4929]: E1122 07:35:09.961390 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Nov 22 07:35:09 crc kubenswrapper[4929]: E1122 07:35:09.961909 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n584h5c5h54dh66hdbh5b6hbbh697h667hc6h95h78h5f4h565hf7h56dh57dh586h67ch54dhd9hb4h679hdch558h59ch67hbh77h8fh59fh65bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5fgcw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-6p98g_openstack(a17264d7-e93b-4448-9ed8-0be507a9120f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:09 crc kubenswrapper[4929]: E1122 07:35:09.963025 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" Nov 22 07:35:10 crc kubenswrapper[4929]: E1122 07:35:10.896411 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" Nov 22 07:35:18 crc kubenswrapper[4929]: I1122 07:35:18.594437 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:35:18 crc kubenswrapper[4929]: I1122 07:35:18.595181 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.938850 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:35:22 crc kubenswrapper[4929]: E1122 07:35:22.939838 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="init" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.939854 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="init" Nov 22 07:35:22 crc kubenswrapper[4929]: E1122 07:35:22.939868 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="dnsmasq-dns" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.939875 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="dnsmasq-dns" Nov 22 07:35:22 crc kubenswrapper[4929]: E1122 07:35:22.939903 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e420a12-2fa0-41d7-97f0-1045b245bcea" containerName="init" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.939911 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e420a12-2fa0-41d7-97f0-1045b245bcea" containerName="init" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.940105 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e420a12-2fa0-41d7-97f0-1045b245bcea" containerName="init" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.940120 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="b99c6a04-d311-41c0-9b1e-675bcbb75aeb" containerName="dnsmasq-dns" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.941623 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:22 crc kubenswrapper[4929]: I1122 07:35:22.951780 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.091088 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.091517 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tm2nh\" (UniqueName: \"kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.091573 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.192986 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tm2nh\" (UniqueName: \"kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.193044 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.193105 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.193641 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.193679 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.215113 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tm2nh\" (UniqueName: \"kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh\") pod \"redhat-operators-zv95t\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:23 crc kubenswrapper[4929]: I1122 07:35:23.261393 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.776715 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.776745 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.778540 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cq7w7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.778795 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:ovsdb-server-init,Image:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,Command:[/usr/local/bin/container-scripts/init-ovsdb-server.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n584h5c5h54dh66hdbh5b6hbbh697h667hc6h95h78h5f4h565hf7h56dh57dh586h67ch54dhd9hb4h679hdch558h59ch67hbh77h8fh59fh65bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-ovs,ReadOnly:false,MountPath:/etc/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log,ReadOnly:false,MountPath:/var/log/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-lib,ReadOnly:false,MountPath:/var/lib/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-c7p6p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-ovs-gglj6_openstack(001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.780105 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-ovs-gglj6" podUID="001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8" Nov 22 07:35:40 crc kubenswrapper[4929]: E1122 07:35:40.780179 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.475480 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.475503 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.476529 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7qzl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(941ef061-1085-45ca-84e2-60447bb10c47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.476603 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-txjkf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(76073571-729d-4de5-bda6-780d28ae6a9b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.477741 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" Nov 22 07:35:47 crc kubenswrapper[4929]: E1122 07:35:47.477853 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" Nov 22 07:35:47 crc kubenswrapper[4929]: I1122 07:35:47.486965 4929 scope.go:117] "RemoveContainer" containerID="76ebaae925f0b81d7b00cd5a05c44811300cc800023df70cd275c5ce00d5b8c2" Nov 22 07:35:48 crc kubenswrapper[4929]: I1122 07:35:48.594857 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:35:48 crc kubenswrapper[4929]: I1122 07:35:48.594928 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:35:48 crc kubenswrapper[4929]: I1122 07:35:48.594980 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:35:48 crc kubenswrapper[4929]: I1122 07:35:48.596189 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:35:48 crc kubenswrapper[4929]: I1122 07:35:48.596363 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c" gracePeriod=600 Nov 22 07:35:51 crc kubenswrapper[4929]: E1122 07:35:51.950026 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdb-server-init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified\\\"\"" pod="openstack/ovn-controller-ovs-gglj6" podUID="001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8" Nov 22 07:35:51 crc kubenswrapper[4929]: E1122 07:35:51.951401 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" Nov 22 07:35:52 crc kubenswrapper[4929]: I1122 07:35:52.893374 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c" exitCode=0 Nov 22 07:35:52 crc kubenswrapper[4929]: I1122 07:35:52.893435 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c"} Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.507577 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.508305 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n598h669h695h5b4hb4h659h5bchb5h88hcfh58bhfdh59dh5dfh9h697hbbh5d8hd6h5d4h588h55ch6h5d5hddh5fhd7h678h67fh5dbh78h5d7q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7rzk7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(2886f6ad-e9ae-48fa-b9eb-9688a1022f29): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.510131 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack/ovsdbserver-nb-0" podUID="2886f6ad-e9ae-48fa-b9eb-9688a1022f29" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.513026 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.513266 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},EnvVar{Name:CONFIG_HASH,Value:n56chbfh668h78h5ch644h545h585h59bh557h654h546h577h8dh6bh5c8h5b4h7fhdh684h66ch5f4h54bh645hbbh586h5d7h698h9fhcdh69h59q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovs-rundir,ReadOnly:true,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:true,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7vh9h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-metrics-p8r4f_openstack(06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.514487 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-metrics-p8r4f" podUID="06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380" Nov 22 07:35:55 crc kubenswrapper[4929]: E1122 07:35:55.927074 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovn-controller-metrics-p8r4f" podUID="06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380" Nov 22 07:36:00 crc kubenswrapper[4929]: E1122 07:36:00.005050 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" Nov 22 07:36:00 crc kubenswrapper[4929]: E1122 07:36:00.005353 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" Nov 22 07:36:00 crc kubenswrapper[4929]: I1122 07:36:00.697992 4929 scope.go:117] "RemoveContainer" containerID="a1361e4489b9f245e60757542f43a3174963376402a0adf9696f3bcc58d868f1" Nov 22 07:36:05 crc kubenswrapper[4929]: I1122 07:36:05.213316 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.056691 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerStarted","Data":"36222877e144232ecdd692b0b7de2d92d9e0f9677a1af113741cc346834fe9f3"} Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.058419 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.064746 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerStarted","Data":"c39934090b6142e29ed4b44ce5fbe7b1f37ade4826e6ce215167fba3546d14c5"} Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.064880 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.065995 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerStarted","Data":"fc35cb1a91f7a32493454e46b3231fa52877a1d60c65a1e2f71ecc1cfa53a385"} Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.082948 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podStartSLOduration=128.082923164 podStartE2EDuration="2m8.082923164s" podCreationTimestamp="2025-11-22 07:33:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:36:07.080643578 +0000 UTC m=+1504.190097611" watchObservedRunningTime="2025-11-22 07:36:07.082923164 +0000 UTC m=+1504.192377187" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.103061 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" podStartSLOduration=129.103043292 podStartE2EDuration="2m9.103043292s" podCreationTimestamp="2025-11-22 07:33:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:36:07.100831429 +0000 UTC m=+1504.210285492" watchObservedRunningTime="2025-11-22 07:36:07.103043292 +0000 UTC m=+1504.212497305" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.314810 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.319370 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.331282 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.455467 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.456088 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k8tz\" (UniqueName: \"kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.456133 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.557882 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.558011 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k8tz\" (UniqueName: \"kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.558043 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.558443 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.558535 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.580037 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k8tz\" (UniqueName: \"kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz\") pod \"redhat-marketplace-cv5dp\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: I1122 07:36:07.649415 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:36:07 crc kubenswrapper[4929]: E1122 07:36:07.847309 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 22 07:36:07 crc kubenswrapper[4929]: E1122 07:36:07.847412 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Nov 22 07:36:07 crc kubenswrapper[4929]: E1122 07:36:07.847676 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s97vp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(18c31ba0-cf68-45bf-87da-d04ab3bd8b21): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:36:07 crc kubenswrapper[4929]: E1122 07:36:07.848913 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" Nov 22 07:36:08 crc kubenswrapper[4929]: E1122 07:36:08.104730 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" Nov 22 07:36:08 crc kubenswrapper[4929]: I1122 07:36:08.370632 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:36:08 crc kubenswrapper[4929]: W1122 07:36:08.376479 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91c4ac46_cc90_4399_9ef0_45c4b010ff70.slice/crio-31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04 WatchSource:0}: Error finding container 31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04: Status 404 returned error can't find the container with id 31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04 Nov 22 07:36:09 crc kubenswrapper[4929]: I1122 07:36:09.109376 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerStarted","Data":"31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.121570 4929 generic.go:334] "Generic (PLEG): container finished" podID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerID="640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467" exitCode=0 Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.121645 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerDied","Data":"640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.128001 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"619c7624-23e8-4578-8361-b9c91a56e5c9","Type":"ContainerStarted","Data":"b8304d95f2ed9641622b1f4dee82c4f50bcb8b9734d5345492dd35bb48056b18"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.128513 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.130754 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d","Type":"ContainerStarted","Data":"9f11bdd83c065e7fa4a513bfb287a7259463230d54a6478bc2c466d49f415057"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.132708 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6a23e882-732c-416e-bfc5-c91517389f64","Type":"ContainerStarted","Data":"2dec4097dc30d0da48950393a3aa3f99672eb9c0ec0b0a68d50a3ec2c56826a9"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.135186 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2886f6ad-e9ae-48fa-b9eb-9688a1022f29","Type":"ContainerStarted","Data":"570e54abc8cd9453f84e3e6c9ae01eb93b085f0a04803b76d38b7e14f516350b"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.136630 4929 generic.go:334] "Generic (PLEG): container finished" podID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerID="40a0e5f6b9c084a4f6a9185a702487e79145b2ca8b7d9ae822ba278aec69a344" exitCode=0 Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.136881 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerDied","Data":"40a0e5f6b9c084a4f6a9185a702487e79145b2ca8b7d9ae822ba278aec69a344"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.138656 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2","Type":"ContainerStarted","Data":"e8a4d0b6cf2507f1a320beb816beab7d14540a8c44dccd2d23f2e1f1e5c69680"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.143248 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.148455 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g" event={"ID":"a17264d7-e93b-4448-9ed8-0be507a9120f","Type":"ContainerStarted","Data":"fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1"} Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.148660 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-6p98g" Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.206315 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=31.416069607 podStartE2EDuration="2m39.206288646s" podCreationTimestamp="2025-11-22 07:33:31 +0000 UTC" firstStartedPulling="2025-11-22 07:33:52.932873457 +0000 UTC m=+1370.042327470" lastFinishedPulling="2025-11-22 07:36:00.723092466 +0000 UTC m=+1497.832546509" observedRunningTime="2025-11-22 07:36:10.203327694 +0000 UTC m=+1507.312781737" watchObservedRunningTime="2025-11-22 07:36:10.206288646 +0000 UTC m=+1507.315742679" Nov 22 07:36:10 crc kubenswrapper[4929]: I1122 07:36:10.280708 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-6p98g" podStartSLOduration=23.544806219 podStartE2EDuration="2m34.280687733s" podCreationTimestamp="2025-11-22 07:33:36 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.527472435 +0000 UTC m=+1372.636926448" lastFinishedPulling="2025-11-22 07:36:06.263353949 +0000 UTC m=+1503.372807962" observedRunningTime="2025-11-22 07:36:10.279681498 +0000 UTC m=+1507.389135511" watchObservedRunningTime="2025-11-22 07:36:10.280687733 +0000 UTC m=+1507.390141746" Nov 22 07:36:14 crc kubenswrapper[4929]: I1122 07:36:14.181387 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerStarted","Data":"abdb302fcdf4f2912c2350f0344010bb5907017916e3de85bb61deb9d8084109"} Nov 22 07:36:14 crc kubenswrapper[4929]: I1122 07:36:14.194599 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:36:14 crc kubenswrapper[4929]: I1122 07:36:14.467417 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:36:14 crc kubenswrapper[4929]: I1122 07:36:14.516606 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:36:15 crc kubenswrapper[4929]: I1122 07:36:15.187441 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="dnsmasq-dns" containerID="cri-o://c39934090b6142e29ed4b44ce5fbe7b1f37ade4826e6ce215167fba3546d14c5" gracePeriod=10 Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.202415 4929 generic.go:334] "Generic (PLEG): container finished" podID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerID="c39934090b6142e29ed4b44ce5fbe7b1f37ade4826e6ce215167fba3546d14c5" exitCode=0 Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.202506 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerDied","Data":"c39934090b6142e29ed4b44ce5fbe7b1f37ade4826e6ce215167fba3546d14c5"} Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.562082 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.617532 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.621153 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc\") pod \"43955a6b-f4ff-4222-8a61-2467e5c2b515\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.621284 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h8g9\" (UniqueName: \"kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9\") pod \"43955a6b-f4ff-4222-8a61-2467e5c2b515\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.621453 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb\") pod \"43955a6b-f4ff-4222-8a61-2467e5c2b515\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.621506 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config\") pod \"43955a6b-f4ff-4222-8a61-2467e5c2b515\" (UID: \"43955a6b-f4ff-4222-8a61-2467e5c2b515\") " Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.629052 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9" (OuterVolumeSpecName: "kube-api-access-5h8g9") pod "43955a6b-f4ff-4222-8a61-2467e5c2b515" (UID: "43955a6b-f4ff-4222-8a61-2467e5c2b515"). InnerVolumeSpecName "kube-api-access-5h8g9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.670960 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "43955a6b-f4ff-4222-8a61-2467e5c2b515" (UID: "43955a6b-f4ff-4222-8a61-2467e5c2b515"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.684202 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "43955a6b-f4ff-4222-8a61-2467e5c2b515" (UID: "43955a6b-f4ff-4222-8a61-2467e5c2b515"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.688830 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config" (OuterVolumeSpecName: "config") pod "43955a6b-f4ff-4222-8a61-2467e5c2b515" (UID: "43955a6b-f4ff-4222-8a61-2467e5c2b515"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.723906 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.723951 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.723963 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/43955a6b-f4ff-4222-8a61-2467e5c2b515-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:36:16 crc kubenswrapper[4929]: I1122 07:36:16.723979 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h8g9\" (UniqueName: \"kubernetes.io/projected/43955a6b-f4ff-4222-8a61-2467e5c2b515-kube-api-access-5h8g9\") on node \"crc\" DevicePath \"\"" Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.212410 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" event={"ID":"43955a6b-f4ff-4222-8a61-2467e5c2b515","Type":"ContainerDied","Data":"9b972d4840dba9f5fbc5b80230ecf3c0e1dd477a74996c80faf4a913ebabd3d9"} Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.212486 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-4p6db" Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.212755 4929 scope.go:117] "RemoveContainer" containerID="c39934090b6142e29ed4b44ce5fbe7b1f37ade4826e6ce215167fba3546d14c5" Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.244743 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.253477 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-4p6db"] Nov 22 07:36:17 crc kubenswrapper[4929]: I1122 07:36:17.965659 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" path="/var/lib/kubelet/pods/43955a6b-f4ff-4222-8a61-2467e5c2b515/volumes" Nov 22 07:36:19 crc kubenswrapper[4929]: I1122 07:36:19.186941 4929 scope.go:117] "RemoveContainer" containerID="58b9a0d44a2ca920537a18ec9a7760e15c98c34a227500bffbdfa211e24e1efb" Nov 22 07:36:21 crc kubenswrapper[4929]: I1122 07:36:21.255646 4929 generic.go:334] "Generic (PLEG): container finished" podID="fad488a3-81d2-42c9-9140-9ca981927e13" containerID="abdb302fcdf4f2912c2350f0344010bb5907017916e3de85bb61deb9d8084109" exitCode=0 Nov 22 07:36:21 crc kubenswrapper[4929]: I1122 07:36:21.255716 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerDied","Data":"abdb302fcdf4f2912c2350f0344010bb5907017916e3de85bb61deb9d8084109"} Nov 22 07:36:22 crc kubenswrapper[4929]: I1122 07:36:22.264996 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d","Type":"ContainerStarted","Data":"c769220ffef9d94bc18d22df9aa177ee1830b783cd295be1a34ea507ad2fffaa"} Nov 22 07:36:22 crc kubenswrapper[4929]: I1122 07:36:22.267915 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerStarted","Data":"00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9"} Nov 22 07:36:22 crc kubenswrapper[4929]: I1122 07:36:22.270197 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-p8r4f" event={"ID":"06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380","Type":"ContainerStarted","Data":"16fb1e15a6cb675aa7b4a060215fa64a427aedf7754c33a471ae624189446a13"} Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.320855 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:36:23 crc kubenswrapper[4929]: E1122 07:36:23.321305 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="dnsmasq-dns" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.321324 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="dnsmasq-dns" Nov 22 07:36:23 crc kubenswrapper[4929]: E1122 07:36:23.321348 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="init" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.321359 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="init" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.321561 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="43955a6b-f4ff-4222-8a61-2467e5c2b515" containerName="dnsmasq-dns" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.322691 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.334524 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.449156 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.449244 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.449338 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8557\" (UniqueName: \"kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.449405 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.449439 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.553024 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.553075 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.553129 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8557\" (UniqueName: \"kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.553178 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.553227 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.554072 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.554099 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.554099 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.554236 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.574132 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8557\" (UniqueName: \"kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557\") pod \"dnsmasq-dns-698758b865-zg7gb\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:23 crc kubenswrapper[4929]: I1122 07:36:23.640347 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:36:24 crc kubenswrapper[4929]: W1122 07:36:24.110532 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d1bd063_db18_4012_85a9_fc270e6d5782.slice/crio-22ef5a01ff33c5e178b59af73abc9e2d451f41a04c86ead077aadfc127d5094c WatchSource:0}: Error finding container 22ef5a01ff33c5e178b59af73abc9e2d451f41a04c86ead077aadfc127d5094c: Status 404 returned error can't find the container with id 22ef5a01ff33c5e178b59af73abc9e2d451f41a04c86ead077aadfc127d5094c Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.120081 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.284722 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"2886f6ad-e9ae-48fa-b9eb-9688a1022f29","Type":"ContainerStarted","Data":"5f0bfa3f35cc772a8b08a6d4520c94b7108dfbb4c09a0182f329e3c9e646fd9b"} Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.285914 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerStarted","Data":"22ef5a01ff33c5e178b59af73abc9e2d451f41a04c86ead077aadfc127d5094c"} Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.287075 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gglj6" event={"ID":"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8","Type":"ContainerStarted","Data":"67b42da3e73bc437b52c7268e1d1e4786a81cdae33e6994803557661c047e05f"} Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.393552 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.400289 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.402190 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-4cxzl" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.402358 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.402434 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.402647 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.414568 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.470995 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wcmc\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-kube-api-access-9wcmc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.471037 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-cache\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.471068 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.471090 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-lock\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.471136 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.572855 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.572914 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-lock\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: E1122 07:36:24.573025 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:24 crc kubenswrapper[4929]: E1122 07:36:24.573048 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:24 crc kubenswrapper[4929]: E1122 07:36:24.573102 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:25.073081801 +0000 UTC m=+1522.182535824 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.573027 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.573394 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.573452 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-lock\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.573694 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wcmc\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-kube-api-access-9wcmc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.573728 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-cache\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.574046 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/dab37299-3b8e-46d0-b6a5-044f7d4878d6-cache\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.594714 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.600306 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wcmc\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-kube-api-access-9wcmc\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.871719 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-bbxj7"] Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.872684 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.876086 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.876397 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.877944 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.890285 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bbxj7"] Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980190 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980329 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980363 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9s5rh\" (UniqueName: \"kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980415 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980531 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980578 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:24 crc kubenswrapper[4929]: I1122 07:36:24.980611 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.081892 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.082563 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.082572 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.082668 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.082753 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9s5rh\" (UniqueName: \"kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: E1122 07:36:25.082813 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:25 crc kubenswrapper[4929]: E1122 07:36:25.082833 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:25 crc kubenswrapper[4929]: E1122 07:36:25.082879 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:26.082860481 +0000 UTC m=+1523.192314594 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.082955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.083055 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.083126 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.083272 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.084071 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.084938 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.100308 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.111543 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.112844 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.116374 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9s5rh\" (UniqueName: \"kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh\") pod \"swift-ring-rebalance-bbxj7\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.197827 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.307496 4929 generic.go:334] "Generic (PLEG): container finished" podID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerID="00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9" exitCode=0 Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.307554 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerDied","Data":"00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9"} Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.315399 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerStarted","Data":"9e1522b539920298a3c257a33e4a738be5a9df227ec123c00b421f1f1d1542a0"} Nov 22 07:36:25 crc kubenswrapper[4929]: I1122 07:36:25.625803 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bbxj7"] Nov 22 07:36:25 crc kubenswrapper[4929]: W1122 07:36:25.627952 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c640fe8_4583_4162_949d_4508edaca274.slice/crio-6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628 WatchSource:0}: Error finding container 6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628: Status 404 returned error can't find the container with id 6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628 Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.105959 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:26 crc kubenswrapper[4929]: E1122 07:36:26.106163 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:26 crc kubenswrapper[4929]: E1122 07:36:26.106200 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:26 crc kubenswrapper[4929]: E1122 07:36:26.106302 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:28.106282999 +0000 UTC m=+1525.215737012 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.324219 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"76073571-729d-4de5-bda6-780d28ae6a9b","Type":"ContainerStarted","Data":"26f4f1a367c34712fefb9c0779fd5560b01e11b0e5ba8c53305f067f7653aeb3"} Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.325916 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerStarted","Data":"d18510dfc3a98f4a1de161d507cdd6bf21b0f4714b14716131c4c0882b470d35"} Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.327544 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"941ef061-1085-45ca-84e2-60447bb10c47","Type":"ContainerStarted","Data":"538735a5e705cdc86d5bc3adf7665f0bdb3fcc025481cc42b1437f9732199404"} Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.328554 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bbxj7" event={"ID":"4c640fe8-4583-4162-949d-4508edaca274","Type":"ContainerStarted","Data":"6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628"} Nov 22 07:36:26 crc kubenswrapper[4929]: I1122 07:36:26.376800 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=26.418638688 podStartE2EDuration="2m49.376778259s" podCreationTimestamp="2025-11-22 07:33:37 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.834386529 +0000 UTC m=+1372.943840542" lastFinishedPulling="2025-11-22 07:36:18.7925261 +0000 UTC m=+1515.901980113" observedRunningTime="2025-11-22 07:36:26.356602279 +0000 UTC m=+1523.466056292" watchObservedRunningTime="2025-11-22 07:36:26.376778259 +0000 UTC m=+1523.486232272" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.241707 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.296816 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.361963 4929 generic.go:334] "Generic (PLEG): container finished" podID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerID="9e1522b539920298a3c257a33e4a738be5a9df227ec123c00b421f1f1d1542a0" exitCode=0 Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.363652 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerDied","Data":"9e1522b539920298a3c257a33e4a738be5a9df227ec123c00b421f1f1d1542a0"} Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.363689 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.438476 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-p8r4f" podStartSLOduration=8.224900086 podStartE2EDuration="2m29.438450895s" podCreationTimestamp="2025-11-22 07:33:58 +0000 UTC" firstStartedPulling="2025-11-22 07:33:59.643782072 +0000 UTC m=+1376.753236085" lastFinishedPulling="2025-11-22 07:36:20.857332881 +0000 UTC m=+1517.966786894" observedRunningTime="2025-11-22 07:36:27.393323749 +0000 UTC m=+1524.502777762" watchObservedRunningTime="2025-11-22 07:36:27.438450895 +0000 UTC m=+1524.547904908" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.479273 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=23.297507762 podStartE2EDuration="2m48.479254856s" podCreationTimestamp="2025-11-22 07:33:39 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.67569306 +0000 UTC m=+1372.785147073" lastFinishedPulling="2025-11-22 07:36:20.857440114 +0000 UTC m=+1517.966894167" observedRunningTime="2025-11-22 07:36:27.476589152 +0000 UTC m=+1524.586043175" watchObservedRunningTime="2025-11-22 07:36:27.479254856 +0000 UTC m=+1524.588708879" Nov 22 07:36:27 crc kubenswrapper[4929]: I1122 07:36:27.515825 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 22 07:36:28 crc kubenswrapper[4929]: I1122 07:36:28.170415 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:28 crc kubenswrapper[4929]: E1122 07:36:28.171000 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:28 crc kubenswrapper[4929]: E1122 07:36:28.171042 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:28 crc kubenswrapper[4929]: E1122 07:36:28.171168 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:32.171138581 +0000 UTC m=+1529.280592614 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.045807 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.081734 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.375445 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.416716 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.607951 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.611941 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.622613 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.622767 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.622803 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.623095 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-4hdc8" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.628854 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697134 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697290 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zfk2\" (UniqueName: \"kubernetes.io/projected/2d73bc22-a412-491f-9484-71864027c02f-kube-api-access-4zfk2\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697335 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697450 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2d73bc22-a412-491f-9484-71864027c02f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697637 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-config\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697703 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-scripts\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.697763 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799374 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2d73bc22-a412-491f-9484-71864027c02f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799766 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-config\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799808 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-scripts\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799836 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799869 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799892 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zfk2\" (UniqueName: \"kubernetes.io/projected/2d73bc22-a412-491f-9484-71864027c02f-kube-api-access-4zfk2\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.799907 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.800525 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2d73bc22-a412-491f-9484-71864027c02f-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.801419 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-config\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.801430 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2d73bc22-a412-491f-9484-71864027c02f-scripts\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:29 crc kubenswrapper[4929]: I1122 07:36:29.822738 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zfk2\" (UniqueName: \"kubernetes.io/projected/2d73bc22-a412-491f-9484-71864027c02f-kube-api-access-4zfk2\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:30 crc kubenswrapper[4929]: I1122 07:36:30.484453 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:30 crc kubenswrapper[4929]: I1122 07:36:30.484726 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:30 crc kubenswrapper[4929]: I1122 07:36:30.491032 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d73bc22-a412-491f-9484-71864027c02f-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"2d73bc22-a412-491f-9484-71864027c02f\") " pod="openstack/ovn-northd-0" Nov 22 07:36:30 crc kubenswrapper[4929]: I1122 07:36:30.540768 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 07:36:31 crc kubenswrapper[4929]: I1122 07:36:31.391321 4929 generic.go:334] "Generic (PLEG): container finished" podID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerID="d18510dfc3a98f4a1de161d507cdd6bf21b0f4714b14716131c4c0882b470d35" exitCode=0 Nov 22 07:36:31 crc kubenswrapper[4929]: I1122 07:36:31.392865 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerDied","Data":"d18510dfc3a98f4a1de161d507cdd6bf21b0f4714b14716131c4c0882b470d35"} Nov 22 07:36:32 crc kubenswrapper[4929]: I1122 07:36:32.251608 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:32 crc kubenswrapper[4929]: E1122 07:36:32.251832 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:32 crc kubenswrapper[4929]: E1122 07:36:32.252011 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:32 crc kubenswrapper[4929]: E1122 07:36:32.252066 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:40.252051209 +0000 UTC m=+1537.361505222 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:38 crc kubenswrapper[4929]: I1122 07:36:38.450460 4929 generic.go:334] "Generic (PLEG): container finished" podID="001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8" containerID="67b42da3e73bc437b52c7268e1d1e4786a81cdae33e6994803557661c047e05f" exitCode=0 Nov 22 07:36:38 crc kubenswrapper[4929]: I1122 07:36:38.450622 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gglj6" event={"ID":"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8","Type":"ContainerDied","Data":"67b42da3e73bc437b52c7268e1d1e4786a81cdae33e6994803557661c047e05f"} Nov 22 07:36:40 crc kubenswrapper[4929]: I1122 07:36:40.302563 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:40 crc kubenswrapper[4929]: E1122 07:36:40.302962 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:40 crc kubenswrapper[4929]: E1122 07:36:40.303407 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:40 crc kubenswrapper[4929]: E1122 07:36:40.303529 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:36:56.303469653 +0000 UTC m=+1553.412923706 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:44 crc kubenswrapper[4929]: E1122 07:36:44.320356 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:17ea20be390a94ab39f5cdd7f0cbc2498046eebcf77fe3dec9aa288d5c2cf46b" Nov 22 07:36:44 crc kubenswrapper[4929]: E1122 07:36:44.321391 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus,Image:registry.redhat.io/cluster-observability-operator/prometheus-rhel9@sha256:17ea20be390a94ab39f5cdd7f0cbc2498046eebcf77fe3dec9aa288d5c2cf46b,Command:[],Args:[--config.file=/etc/prometheus/config_out/prometheus.env.yaml --web.enable-lifecycle --web.enable-remote-write-receiver --web.route-prefix=/ --storage.tsdb.retention.time=24h --storage.tsdb.path=/prometheus --web.config.file=/etc/prometheus/web_config/web-config.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:web,HostPort:0,ContainerPort:9090,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-out,ReadOnly:true,MountPath:/etc/prometheus/config_out,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:tls-assets,ReadOnly:true,MountPath:/etc/prometheus/certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-db,ReadOnly:false,MountPath:/prometheus,SubPath:prometheus-db,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:prometheus-metric-storage-rulefiles-0,ReadOnly:false,MountPath:/etc/prometheus/rules/prometheus-metric-storage-rulefiles-0,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:web-config,ReadOnly:true,MountPath:/etc/prometheus/web_config/web-config.yaml,SubPath:web-config.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8w4x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/-/healthy,Port:{1 0 web},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:3,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/-/ready,Port:{1 0 web},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:3,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/-/ready,Port:{1 0 web},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:3,PeriodSeconds:15,SuccessThreshold:1,FailureThreshold:60,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(fad488a3-81d2-42c9-9140-9ca981927e13): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:36:46 crc kubenswrapper[4929]: I1122 07:36:46.769754 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:46 crc kubenswrapper[4929]: I1122 07:36:46.773022 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:51 crc kubenswrapper[4929]: I1122 07:36:51.771349 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:51 crc kubenswrapper[4929]: I1122 07:36:51.777854 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:56 crc kubenswrapper[4929]: I1122 07:36:56.313960 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:36:56 crc kubenswrapper[4929]: E1122 07:36:56.314192 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:36:56 crc kubenswrapper[4929]: E1122 07:36:56.314633 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:36:56 crc kubenswrapper[4929]: E1122 07:36:56.314689 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:37:28.314670447 +0000 UTC m=+1585.424124460 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:36:56 crc kubenswrapper[4929]: I1122 07:36:56.774038 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:56 crc kubenswrapper[4929]: I1122 07:36:56.774715 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="command timed out" Nov 22 07:36:56 crc kubenswrapper[4929]: I1122 07:36:56.774789 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/ovn-controller-6p98g" Nov 22 07:36:56 crc kubenswrapper[4929]: I1122 07:36:56.782271 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="ovn-controller" containerStatusID={"Type":"cri-o","ID":"fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1"} pod="openstack/ovn-controller-6p98g" containerMessage="Container ovn-controller failed liveness probe, will be restarted" Nov 22 07:36:58 crc kubenswrapper[4929]: I1122 07:36:58.677641 4929 generic.go:334] "Generic (PLEG): container finished" podID="76073571-729d-4de5-bda6-780d28ae6a9b" containerID="26f4f1a367c34712fefb9c0779fd5560b01e11b0e5ba8c53305f067f7653aeb3" exitCode=0 Nov 22 07:36:58 crc kubenswrapper[4929]: I1122 07:36:58.677748 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"76073571-729d-4de5-bda6-780d28ae6a9b","Type":"ContainerDied","Data":"26f4f1a367c34712fefb9c0779fd5560b01e11b0e5ba8c53305f067f7653aeb3"} Nov 22 07:36:58 crc kubenswrapper[4929]: I1122 07:36:58.680362 4929 generic.go:334] "Generic (PLEG): container finished" podID="941ef061-1085-45ca-84e2-60447bb10c47" containerID="538735a5e705cdc86d5bc3adf7665f0bdb3fcc025481cc42b1437f9732199404" exitCode=0 Nov 22 07:36:58 crc kubenswrapper[4929]: I1122 07:36:58.680397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"941ef061-1085-45ca-84e2-60447bb10c47","Type":"ContainerDied","Data":"538735a5e705cdc86d5bc3adf7665f0bdb3fcc025481cc42b1437f9732199404"} Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.903825 4929 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 22 07:37:00 crc kubenswrapper[4929]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-22T07:36:57Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: 2025-11-22T07:36:58Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: /etc/init.d/functions: line 589: 46 Alarm clock "$@" Nov 22 07:37:00 crc kubenswrapper[4929]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-6p98g" message=< Nov 22 07:37:00 crc kubenswrapper[4929]: Exiting ovn-controller (1) [FAILED] Nov 22 07:37:00 crc kubenswrapper[4929]: Killing ovn-controller (1) [ OK ] Nov 22 07:37:00 crc kubenswrapper[4929]: 2025-11-22T07:36:57Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: 2025-11-22T07:36:58Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: /etc/init.d/functions: line 589: 46 Alarm clock "$@" Nov 22 07:37:00 crc kubenswrapper[4929]: > Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.904316 4929 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 22 07:37:00 crc kubenswrapper[4929]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-22T07:36:57Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: 2025-11-22T07:36:58Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 22 07:37:00 crc kubenswrapper[4929]: /etc/init.d/functions: line 589: 46 Alarm clock "$@" Nov 22 07:37:00 crc kubenswrapper[4929]: > pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" containerID="cri-o://fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" Nov 22 07:37:00 crc kubenswrapper[4929]: I1122 07:37:00.904374 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" containerID="cri-o://fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" gracePeriod=26 Nov 22 07:37:00 crc kubenswrapper[4929]: I1122 07:37:00.904865 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output="" Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.905174 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.905410 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.905709 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:00 crc kubenswrapper[4929]: E1122 07:37:00.905740 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" Nov 22 07:37:01 crc kubenswrapper[4929]: E1122 07:37:01.902076 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:01 crc kubenswrapper[4929]: E1122 07:37:01.902694 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:01 crc kubenswrapper[4929]: E1122 07:37:01.903441 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:01 crc kubenswrapper[4929]: E1122 07:37:01.903473 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" Nov 22 07:37:03 crc kubenswrapper[4929]: I1122 07:37:03.727291 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6p98g_a17264d7-e93b-4448-9ed8-0be507a9120f/ovn-controller/0.log" Nov 22 07:37:03 crc kubenswrapper[4929]: I1122 07:37:03.727352 4929 generic.go:334] "Generic (PLEG): container finished" podID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" exitCode=143 Nov 22 07:37:03 crc kubenswrapper[4929]: I1122 07:37:03.727386 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g" event={"ID":"a17264d7-e93b-4448-9ed8-0be507a9120f","Type":"ContainerDied","Data":"fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1"} Nov 22 07:37:06 crc kubenswrapper[4929]: E1122 07:37:06.902145 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:06 crc kubenswrapper[4929]: E1122 07:37:06.903342 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:06 crc kubenswrapper[4929]: E1122 07:37:06.903908 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:06 crc kubenswrapper[4929]: E1122 07:37:06.903995 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.623155 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bhx9f"] Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.625969 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.640100 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bhx9f"] Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.798538 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-utilities\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.799007 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-catalog-content\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.799700 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1515626b-94a1-4527-8129-14fe7afaf988-kube-api-access-fd44s\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: E1122 07:37:11.901229 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.901585 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-catalog-content\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.901699 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1515626b-94a1-4527-8129-14fe7afaf988-kube-api-access-fd44s\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.901807 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-utilities\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.902484 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-utilities\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.902609 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1515626b-94a1-4527-8129-14fe7afaf988-catalog-content\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: E1122 07:37:11.902798 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:11 crc kubenswrapper[4929]: E1122 07:37:11.903439 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:11 crc kubenswrapper[4929]: E1122 07:37:11.903501 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.931418 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1515626b-94a1-4527-8129-14fe7afaf988-kube-api-access-fd44s\") pod \"community-operators-bhx9f\" (UID: \"1515626b-94a1-4527-8129-14fe7afaf988\") " pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:11 crc kubenswrapper[4929]: I1122 07:37:11.963606 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:37:12 crc kubenswrapper[4929]: E1122 07:37:12.040911 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad" Nov 22 07:37:12 crc kubenswrapper[4929]: E1122 07:37:12.042192 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad,Command:[/bin/opm],Args:[serve /extracted-catalog/catalog --cache-dir=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOMEMLIMIT,Value:30MiB,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{31457280 0} {} 30Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tm2nh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-zv95t_openshift-marketplace(2584eaad-5c38-40d2-b1da-7a6268080fd0): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:37:12 crc kubenswrapper[4929]: E1122 07:37:12.044449 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" Nov 22 07:37:13 crc kubenswrapper[4929]: E1122 07:37:13.953371 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad" Nov 22 07:37:13 crc kubenswrapper[4929]: E1122 07:37:13.955032 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad,Command:[/bin/opm],Args:[serve /extracted-catalog/catalog --cache-dir=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOMEMLIMIT,Value:20MiB,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{20971520 0} {} 20Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8k8tz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-cv5dp_openshift-marketplace(91c4ac46-cc90-4399-9ef0-45c4b010ff70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:37:13 crc kubenswrapper[4929]: E1122 07:37:13.957290 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" Nov 22 07:37:14 crc kubenswrapper[4929]: I1122 07:37:14.400447 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 07:37:14 crc kubenswrapper[4929]: W1122 07:37:14.443878 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d73bc22_a412_491f_9484_71864027c02f.slice/crio-c950237ec166747b9c82d95c60370e7fd4814fa50a047337b14ac8c77a68f8b2 WatchSource:0}: Error finding container c950237ec166747b9c82d95c60370e7fd4814fa50a047337b14ac8c77a68f8b2: Status 404 returned error can't find the container with id c950237ec166747b9c82d95c60370e7fd4814fa50a047337b14ac8c77a68f8b2 Nov 22 07:37:14 crc kubenswrapper[4929]: I1122 07:37:14.486796 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bhx9f"] Nov 22 07:37:14 crc kubenswrapper[4929]: W1122 07:37:14.500615 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1515626b_94a1_4527_8129_14fe7afaf988.slice/crio-24d779931936e8a91ee55104e36d5270490435acfbe20735bcdd9d048e29fc78 WatchSource:0}: Error finding container 24d779931936e8a91ee55104e36d5270490435acfbe20735bcdd9d048e29fc78: Status 404 returned error can't find the container with id 24d779931936e8a91ee55104e36d5270490435acfbe20735bcdd9d048e29fc78 Nov 22 07:37:14 crc kubenswrapper[4929]: I1122 07:37:14.821113 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhx9f" event={"ID":"1515626b-94a1-4527-8129-14fe7afaf988","Type":"ContainerStarted","Data":"24d779931936e8a91ee55104e36d5270490435acfbe20735bcdd9d048e29fc78"} Nov 22 07:37:14 crc kubenswrapper[4929]: I1122 07:37:14.823780 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2d73bc22-a412-491f-9484-71864027c02f","Type":"ContainerStarted","Data":"c950237ec166747b9c82d95c60370e7fd4814fa50a047337b14ac8c77a68f8b2"} Nov 22 07:37:15 crc kubenswrapper[4929]: I1122 07:37:15.830944 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6p98g_a17264d7-e93b-4448-9ed8-0be507a9120f/ovn-controller/0.log" Nov 22 07:37:15 crc kubenswrapper[4929]: I1122 07:37:15.831291 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g" event={"ID":"a17264d7-e93b-4448-9ed8-0be507a9120f","Type":"ContainerStarted","Data":"bb5198c15aa0faef912f84a1a47bfeccbd288dbfbad69ce2482d517e2abc4ffd"} Nov 22 07:37:15 crc kubenswrapper[4929]: I1122 07:37:15.833659 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"941ef061-1085-45ca-84e2-60447bb10c47","Type":"ContainerStarted","Data":"3618100ebde033f5cc3cac185127304b7773a34898841e8947815715eb0b4177"} Nov 22 07:37:15 crc kubenswrapper[4929]: I1122 07:37:15.835403 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"76073571-729d-4de5-bda6-780d28ae6a9b","Type":"ContainerStarted","Data":"71bd6d971865ec36fa98e67f5e55669eaa860448f2105164efefaa8dc7f0923e"} Nov 22 07:37:15 crc kubenswrapper[4929]: I1122 07:37:15.838496 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerStarted","Data":"dda67dbf58cd6aef06969937f8f65ded435aba5e9af21ebf9d55c32e5ee7af24"} Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.848224 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gglj6" event={"ID":"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8","Type":"ContainerStarted","Data":"eba346ffb7319ff22a54f1875df6f566fd087f540944f59aaf8cf5fdb4b5777b"} Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.849546 4929 generic.go:334] "Generic (PLEG): container finished" podID="1515626b-94a1-4527-8129-14fe7afaf988" containerID="ec8c625aa129a5690c793bb86d43de580f1aa49b92681e78ccf5df821dd97998" exitCode=0 Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.849592 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhx9f" event={"ID":"1515626b-94a1-4527-8129-14fe7afaf988","Type":"ContainerDied","Data":"ec8c625aa129a5690c793bb86d43de580f1aa49b92681e78ccf5df821dd97998"} Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.851046 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18c31ba0-cf68-45bf-87da-d04ab3bd8b21","Type":"ContainerStarted","Data":"62d3aa065282e07332a4dfb81e01b075ecde2a4d56c10e06cf2c5f6c411f4ec6"} Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.851350 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:37:16 crc kubenswrapper[4929]: I1122 07:37:16.876008 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-zg7gb" podStartSLOduration=53.875990422 podStartE2EDuration="53.875990422s" podCreationTimestamp="2025-11-22 07:36:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:37:16.873017669 +0000 UTC m=+1573.982471702" watchObservedRunningTime="2025-11-22 07:37:16.875990422 +0000 UTC m=+1573.985444435" Nov 22 07:37:16 crc kubenswrapper[4929]: E1122 07:37:16.902129 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:16 crc kubenswrapper[4929]: E1122 07:37:16.902577 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:16 crc kubenswrapper[4929]: E1122 07:37:16.902837 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" containerID="fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 22 07:37:16 crc kubenswrapper[4929]: E1122 07:37:16.902883 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of fc82394b87bb914994d1fcbdb7c206dda8c4571deea02e536ef8c41dec029ff1 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" Nov 22 07:37:17 crc kubenswrapper[4929]: I1122 07:37:17.858623 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-6p98g" Nov 22 07:37:17 crc kubenswrapper[4929]: I1122 07:37:17.858681 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 07:37:17 crc kubenswrapper[4929]: I1122 07:37:17.902882 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=85.474007465 podStartE2EDuration="3m50.902862271s" podCreationTimestamp="2025-11-22 07:33:27 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.431162439 +0000 UTC m=+1372.540616493" lastFinishedPulling="2025-11-22 07:36:20.860017286 +0000 UTC m=+1517.969471299" observedRunningTime="2025-11-22 07:37:17.900048391 +0000 UTC m=+1575.009502434" watchObservedRunningTime="2025-11-22 07:37:17.902862271 +0000 UTC m=+1575.012316284" Nov 22 07:37:17 crc kubenswrapper[4929]: I1122 07:37:17.922741 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=86.698254434 podStartE2EDuration="3m51.92272104s" podCreationTimestamp="2025-11-22 07:33:26 +0000 UTC" firstStartedPulling="2025-11-22 07:33:54.944458255 +0000 UTC m=+1372.053912268" lastFinishedPulling="2025-11-22 07:36:20.168924821 +0000 UTC m=+1517.278378874" observedRunningTime="2025-11-22 07:37:17.919102251 +0000 UTC m=+1575.028556284" watchObservedRunningTime="2025-11-22 07:37:17.92272104 +0000 UTC m=+1575.032175073" Nov 22 07:37:18 crc kubenswrapper[4929]: I1122 07:37:18.459804 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:37:18 crc kubenswrapper[4929]: I1122 07:37:18.902663 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=28.374591101 podStartE2EDuration="3m46.902644181s" podCreationTimestamp="2025-11-22 07:33:32 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.521833809 +0000 UTC m=+1372.631287822" lastFinishedPulling="2025-11-22 07:37:14.049886899 +0000 UTC m=+1571.159340902" observedRunningTime="2025-11-22 07:37:18.900436526 +0000 UTC m=+1576.009890539" watchObservedRunningTime="2025-11-22 07:37:18.902644181 +0000 UTC m=+1576.012098194" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.321819 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.327206 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.642453 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.713139 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.713493 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" containerID="cri-o://36222877e144232ecdd692b0b7de2d92d9e0f9677a1af113741cc346834fe9f3" gracePeriod=10 Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.787398 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.789769 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.799782 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.910059 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerStarted","Data":"68b98099fc432be4259fb71ff85ba2185f0ceeac47d7364cb0ab58369a1fe9b8"} Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.912877 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.913112 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84lw7\" (UniqueName: \"kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:23 crc kubenswrapper[4929]: I1122 07:37:23.913159 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.014948 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.015343 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84lw7\" (UniqueName: \"kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.015473 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.015578 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.015908 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.036944 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84lw7\" (UniqueName: \"kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7\") pod \"certified-operators-n8qwb\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.118952 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:37:24 crc kubenswrapper[4929]: I1122 07:37:24.466361 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Nov 22 07:37:28 crc kubenswrapper[4929]: I1122 07:37:28.186089 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:37:28 crc kubenswrapper[4929]: I1122 07:37:28.340632 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:37:28 crc kubenswrapper[4929]: E1122 07:37:28.340839 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:37:28 crc kubenswrapper[4929]: E1122 07:37:28.341037 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:37:28 crc kubenswrapper[4929]: E1122 07:37:28.341098 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:38:32.341081352 +0000 UTC m=+1649.450535355 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:37:28 crc kubenswrapper[4929]: I1122 07:37:28.461476 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:37:29 crc kubenswrapper[4929]: I1122 07:37:29.466202 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Nov 22 07:37:30 crc kubenswrapper[4929]: I1122 07:37:30.985383 4929 generic.go:334] "Generic (PLEG): container finished" podID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerID="36222877e144232ecdd692b0b7de2d92d9e0f9677a1af113741cc346834fe9f3" exitCode=0 Nov 22 07:37:30 crc kubenswrapper[4929]: I1122 07:37:30.985482 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerDied","Data":"36222877e144232ecdd692b0b7de2d92d9e0f9677a1af113741cc346834fe9f3"} Nov 22 07:37:34 crc kubenswrapper[4929]: I1122 07:37:34.466351 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Nov 22 07:37:34 crc kubenswrapper[4929]: I1122 07:37:34.467078 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:37:38 crc kubenswrapper[4929]: I1122 07:37:38.184779 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:37:38 crc kubenswrapper[4929]: I1122 07:37:38.460465 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:37:39 crc kubenswrapper[4929]: I1122 07:37:39.466715 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Nov 22 07:37:46 crc kubenswrapper[4929]: I1122 07:37:46.953132 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:37:46 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:37:46 crc kubenswrapper[4929]: > Nov 22 07:37:48 crc kubenswrapper[4929]: I1122 07:37:48.184600 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:37:48 crc kubenswrapper[4929]: I1122 07:37:48.461019 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:37:49 crc kubenswrapper[4929]: E1122 07:37:49.137094 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified" Nov 22 07:37:49 crc kubenswrapper[4929]: E1122 07:37:49.137399 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:swift-ring-rebalance,Image:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,Command:[/usr/local/bin/swift-ring-tool all],Args:[],WorkingDir:/etc/swift,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CM_NAME,Value:swift-ring-files,ValueFrom:nil,},EnvVar{Name:NAMESPACE,Value:openstack,ValueFrom:nil,},EnvVar{Name:OWNER_APIVERSION,Value:swift.openstack.org/v1beta1,ValueFrom:nil,},EnvVar{Name:OWNER_KIND,Value:SwiftRing,ValueFrom:nil,},EnvVar{Name:OWNER_NAME,Value:swift-ring,ValueFrom:nil,},EnvVar{Name:OWNER_UID,Value:b3343e2d-4dac-4fca-9529-0de78ad448bf,ValueFrom:nil,},EnvVar{Name:SWIFT_MIN_PART_HOURS,Value:1,ValueFrom:nil,},EnvVar{Name:SWIFT_PART_POWER,Value:10,ValueFrom:nil,},EnvVar{Name:SWIFT_REPLICAS,Value:1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/swift-ring-tool,SubPath:swift-ring-tool,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:swiftconf,ReadOnly:true,MountPath:/etc/swift/swift.conf,SubPath:swift.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ring-data-devices,ReadOnly:true,MountPath:/var/lib/config-data/ring-devices,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dispersionconf,ReadOnly:true,MountPath:/etc/swift/dispersion.conf,SubPath:dispersion.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9s5rh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-ring-rebalance-bbxj7_openstack(4c640fe8-4583-4162-949d-4508edaca274): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:37:49 crc kubenswrapper[4929]: E1122 07:37:49.138748 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"swift-ring-rebalance\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/swift-ring-rebalance-bbxj7" podUID="4c640fe8-4583-4162-949d-4508edaca274" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.179968 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" event={"ID":"f4a5840d-58f8-4312-854c-b927be25fc3f","Type":"ContainerDied","Data":"14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac"} Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.180488 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14fc8f52a0213d43f55c77c0a9aea9ce21e5fbe59af7ee5be4a9dd30e18f48ac" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.244793 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.333115 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc\") pod \"f4a5840d-58f8-4312-854c-b927be25fc3f\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.333199 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlvnl\" (UniqueName: \"kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl\") pod \"f4a5840d-58f8-4312-854c-b927be25fc3f\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.333237 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config\") pod \"f4a5840d-58f8-4312-854c-b927be25fc3f\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.333291 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb\") pod \"f4a5840d-58f8-4312-854c-b927be25fc3f\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.333321 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb\") pod \"f4a5840d-58f8-4312-854c-b927be25fc3f\" (UID: \"f4a5840d-58f8-4312-854c-b927be25fc3f\") " Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.343477 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl" (OuterVolumeSpecName: "kube-api-access-hlvnl") pod "f4a5840d-58f8-4312-854c-b927be25fc3f" (UID: "f4a5840d-58f8-4312-854c-b927be25fc3f"). InnerVolumeSpecName "kube-api-access-hlvnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.372244 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f4a5840d-58f8-4312-854c-b927be25fc3f" (UID: "f4a5840d-58f8-4312-854c-b927be25fc3f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.373619 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config" (OuterVolumeSpecName: "config") pod "f4a5840d-58f8-4312-854c-b927be25fc3f" (UID: "f4a5840d-58f8-4312-854c-b927be25fc3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.376689 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f4a5840d-58f8-4312-854c-b927be25fc3f" (UID: "f4a5840d-58f8-4312-854c-b927be25fc3f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.382498 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f4a5840d-58f8-4312-854c-b927be25fc3f" (UID: "f4a5840d-58f8-4312-854c-b927be25fc3f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.436809 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.437113 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlvnl\" (UniqueName: \"kubernetes.io/projected/f4a5840d-58f8-4312-854c-b927be25fc3f-kube-api-access-hlvnl\") on node \"crc\" DevicePath \"\"" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.437191 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.437313 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.437386 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4a5840d-58f8-4312-854c-b927be25fc3f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:37:49 crc kubenswrapper[4929]: I1122 07:37:49.466611 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: i/o timeout" Nov 22 07:37:49 crc kubenswrapper[4929]: E1122 07:37:49.738497 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"swift-ring-rebalance\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified\\\"\"" pod="openstack/swift-ring-rebalance-bbxj7" podUID="4c640fe8-4583-4162-949d-4508edaca274" Nov 22 07:37:50 crc kubenswrapper[4929]: I1122 07:37:50.190737 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-pk7mc" Nov 22 07:37:50 crc kubenswrapper[4929]: I1122 07:37:50.211483 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:37:50 crc kubenswrapper[4929]: I1122 07:37:50.217265 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-pk7mc"] Nov 22 07:37:51 crc kubenswrapper[4929]: I1122 07:37:51.960582 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:37:51 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:37:51 crc kubenswrapper[4929]: > Nov 22 07:37:51 crc kubenswrapper[4929]: I1122 07:37:51.962394 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" path="/var/lib/kubelet/pods/f4a5840d-58f8-4312-854c-b927be25fc3f/volumes" Nov 22 07:37:52 crc kubenswrapper[4929]: I1122 07:37:52.216602 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-gglj6" event={"ID":"001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8","Type":"ContainerStarted","Data":"01a5d0c211b256f63b15a79d92ec57c4109341255464528253327021c87cdc55"} Nov 22 07:37:55 crc kubenswrapper[4929]: I1122 07:37:55.257506 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:37:55 crc kubenswrapper[4929]: I1122 07:37:55.260151 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:37:55 crc kubenswrapper[4929]: I1122 07:37:55.289475 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-gglj6" podStartSLOduration=119.753591456 podStartE2EDuration="4m19.289453276s" podCreationTimestamp="2025-11-22 07:33:36 +0000 UTC" firstStartedPulling="2025-11-22 07:33:57.024064214 +0000 UTC m=+1374.133518247" lastFinishedPulling="2025-11-22 07:36:16.559926054 +0000 UTC m=+1513.669380067" observedRunningTime="2025-11-22 07:37:55.282040344 +0000 UTC m=+1612.391494387" watchObservedRunningTime="2025-11-22 07:37:55.289453276 +0000 UTC m=+1612.398907299" Nov 22 07:37:55 crc kubenswrapper[4929]: I1122 07:37:55.315864 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:37:55 crc kubenswrapper[4929]: I1122 07:37:55.325767 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:37:56 crc kubenswrapper[4929]: I1122 07:37:56.970960 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:37:56 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:37:56 crc kubenswrapper[4929]: > Nov 22 07:37:58 crc kubenswrapper[4929]: I1122 07:37:58.184658 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:37:58 crc kubenswrapper[4929]: I1122 07:37:58.459959 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:01 crc kubenswrapper[4929]: I1122 07:38:01.953845 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:01 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:01 crc kubenswrapper[4929]: > Nov 22 07:38:06 crc kubenswrapper[4929]: I1122 07:38:06.958716 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:06 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:06 crc kubenswrapper[4929]: > Nov 22 07:38:08 crc kubenswrapper[4929]: I1122 07:38:08.187354 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:08 crc kubenswrapper[4929]: I1122 07:38:08.460988 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:11 crc kubenswrapper[4929]: I1122 07:38:11.936998 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:11 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:11 crc kubenswrapper[4929]: > Nov 22 07:38:16 crc kubenswrapper[4929]: I1122 07:38:16.949997 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:16 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:16 crc kubenswrapper[4929]: > Nov 22 07:38:18 crc kubenswrapper[4929]: I1122 07:38:18.185561 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:18 crc kubenswrapper[4929]: I1122 07:38:18.460449 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:18 crc kubenswrapper[4929]: I1122 07:38:18.594843 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:38:18 crc kubenswrapper[4929]: I1122 07:38:18.594953 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:38:19 crc kubenswrapper[4929]: I1122 07:38:19.947545 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:21 crc kubenswrapper[4929]: E1122 07:38:21.718456 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified" Nov 22 07:38:21 crc kubenswrapper[4929]: E1122 07:38:21.718905 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-northd,Image:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,Command:[/usr/bin/ovn-northd],Args:[-vfile:off -vconsole:info --n-threads=1 --ovnnb-db=ssl:ovsdbserver-nb-0.openstack.svc.cluster.local:6641 --ovnsb-db=ssl:ovsdbserver-sb-0.openstack.svc.cluster.local:6642 --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68bh57h646hfbh687h96h697h578h64bh64dhfh679h697h66bh695h679h59fh84hcbh9h8h695hd9hc6h64bhbfhc5h5bfh64h5b4h55dh59fq,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:certs,Value:n58fh54hd6h55chbdh68dh564hf6h58h649h58fhcdhdfh68dh5d7h668h57dh76hdfh547h5cdh694h78h5ddh664h58dh55bh86h549h5bdhfch57cq,ValueFrom:nil,},EnvVar{Name:certs_metrics,Value:n595hcchf6h584hf8hcfh68fhcbh65bh688h658hcbh66bh659h576h699hb5h5c6h54dh66fh656h9ch654hbfh65fh74h5bbhb4h98hcbh54fh69q,ValueFrom:nil,},EnvVar{Name:ovnnorthd-config,Value:n5c8h7ch56bh8dh8hc4h5dch9dh68h6bhb7h598h549h5dbh66fh6bh5b4h5cch5d6h55ch57fhfch588h89h5ddh5d6h65bh65bh8dhc4h67dh569q,ValueFrom:nil,},EnvVar{Name:ovnnorthd-scripts,Value:n664hd8h66ch58dh64hc9h66bhd4h558h697h67bh557hdch664h567h669h555h696h556h556h5fh5bh569hbh665h9dh4h9bh564hc8h5b7h5c4q,ValueFrom:nil,},EnvVar{Name:tls-ca-bundle.pem,Value:nd7hbchbh7h66chc4h88h86h5dfhfch5d9h56ch556h5b6hfdh584h7bhc4h64h5f9h59bh5d4h59h5f5h86h648hdch76hcdh5f8hcdh655q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4zfk2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-northd-0_openstack(2d73bc22-a412-491f-9484-71864027c02f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:38:21 crc kubenswrapper[4929]: I1122 07:38:21.951849 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:21 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:21 crc kubenswrapper[4929]: > Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.263580 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-gglj6" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.483582 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6p98g-config-b2wj8"] Nov 22 07:38:22 crc kubenswrapper[4929]: E1122 07:38:22.483931 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="init" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.483949 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="init" Nov 22 07:38:22 crc kubenswrapper[4929]: E1122 07:38:22.483964 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.483971 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.484202 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4a5840d-58f8-4312-854c-b927be25fc3f" containerName="dnsmasq-dns" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.484859 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.488462 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.493486 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g-config-b2wj8"] Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582640 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582685 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wdm8\" (UniqueName: \"kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582727 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582764 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582804 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.582836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.684270 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.684569 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wdm8\" (UniqueName: \"kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.684726 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.684877 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685016 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685113 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685118 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685064 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685142 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.685549 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.686763 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.715746 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wdm8\" (UniqueName: \"kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8\") pod \"ovn-controller-6p98g-config-b2wj8\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:22 crc kubenswrapper[4929]: I1122 07:38:22.807348 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:38:26 crc kubenswrapper[4929]: I1122 07:38:26.936310 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:26 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:26 crc kubenswrapper[4929]: > Nov 22 07:38:27 crc kubenswrapper[4929]: E1122 07:38:27.415295 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[etc-swift], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-storage-0" podUID="dab37299-3b8e-46d0-b6a5-044f7d4878d6" Nov 22 07:38:27 crc kubenswrapper[4929]: I1122 07:38:27.590114 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 07:38:28 crc kubenswrapper[4929]: I1122 07:38:28.185617 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:28 crc kubenswrapper[4929]: I1122 07:38:28.460886 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:31 crc kubenswrapper[4929]: I1122 07:38:31.960581 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:31 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:31 crc kubenswrapper[4929]: > Nov 22 07:38:32 crc kubenswrapper[4929]: I1122 07:38:32.360102 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:38:32 crc kubenswrapper[4929]: E1122 07:38:32.360380 4929 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 07:38:32 crc kubenswrapper[4929]: E1122 07:38:32.360404 4929 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 07:38:32 crc kubenswrapper[4929]: E1122 07:38:32.360479 4929 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift podName:dab37299-3b8e-46d0-b6a5-044f7d4878d6 nodeName:}" failed. No retries permitted until 2025-11-22 07:40:34.360456428 +0000 UTC m=+1771.469910471 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift") pod "swift-storage-0" (UID: "dab37299-3b8e-46d0-b6a5-044f7d4878d6") : configmap "swift-ring-files" not found Nov 22 07:38:36 crc kubenswrapper[4929]: I1122 07:38:36.934623 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:36 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:36 crc kubenswrapper[4929]: > Nov 22 07:38:36 crc kubenswrapper[4929]: I1122 07:38:36.948801 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:38 crc kubenswrapper[4929]: I1122 07:38:38.186573 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:38 crc kubenswrapper[4929]: I1122 07:38:38.460294 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:39 crc kubenswrapper[4929]: I1122 07:38:39.979464 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:39 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:39 crc kubenswrapper[4929]: > Nov 22 07:38:41 crc kubenswrapper[4929]: I1122 07:38:41.956067 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:41 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:41 crc kubenswrapper[4929]: > Nov 22 07:38:44 crc kubenswrapper[4929]: E1122 07:38:44.723135 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4" Nov 22 07:38:44 crc kubenswrapper[4929]: E1122 07:38:44.723963 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:thanos-sidecar,Image:registry.redhat.io/cluster-observability-operator/thanos-rhel9@sha256:d972f4faa5e9c121402d23ed85002f26af48ec36b1b71a7489d677b3913d08b4,Command:[],Args:[sidecar --prometheus.url=http://localhost:9090/ --grpc-address=:10901 --http-address=:10902 --log.level=info --prometheus.http-client-file=/etc/thanos/config/prometheus.http-client-file.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:10902,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10901,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:thanos-prometheus-http-client-file,ReadOnly:false,MountPath:/etc/thanos/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f8w4x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod prometheus-metric-storage-0_openstack(fad488a3-81d2-42c9-9140-9ca981927e13): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:38:44 crc kubenswrapper[4929]: E1122 07:38:44.725255 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"prometheus\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\", failed to \"StartContainer\" for \"thanos-sidecar\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" Nov 22 07:38:46 crc kubenswrapper[4929]: I1122 07:38:46.945195 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:46 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:46 crc kubenswrapper[4929]: > Nov 22 07:38:48 crc kubenswrapper[4929]: I1122 07:38:48.185323 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:48 crc kubenswrapper[4929]: I1122 07:38:48.460842 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:38:48 crc kubenswrapper[4929]: I1122 07:38:48.594772 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:38:48 crc kubenswrapper[4929]: I1122 07:38:48.594853 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:38:51 crc kubenswrapper[4929]: I1122 07:38:51.944477 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:51 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:51 crc kubenswrapper[4929]: > Nov 22 07:38:56 crc kubenswrapper[4929]: I1122 07:38:56.954098 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:38:56 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:38:56 crc kubenswrapper[4929]: > Nov 22 07:38:58 crc kubenswrapper[4929]: I1122 07:38:58.184996 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:38:58 crc kubenswrapper[4929]: I1122 07:38:58.460303 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:39:01 crc kubenswrapper[4929]: I1122 07:39:01.966637 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:39:01 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:39:01 crc kubenswrapper[4929]: > Nov 22 07:39:06 crc kubenswrapper[4929]: I1122 07:39:06.966100 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:39:06 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:39:06 crc kubenswrapper[4929]: > Nov 22 07:39:08 crc kubenswrapper[4929]: I1122 07:39:08.185613 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:39:08 crc kubenswrapper[4929]: I1122 07:39:08.460443 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:39:11 crc kubenswrapper[4929]: I1122 07:39:11.954158 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-6p98g" podUID="a17264d7-e93b-4448-9ed8-0be507a9120f" containerName="ovn-controller" probeResult="failure" output=< Nov 22 07:39:11 crc kubenswrapper[4929]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 07:39:11 crc kubenswrapper[4929]: > Nov 22 07:39:13 crc kubenswrapper[4929]: E1122 07:39:13.883516 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 07:39:13 crc kubenswrapper[4929]: E1122 07:39:13.883829 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fd44s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-bhx9f_openshift-marketplace(1515626b-94a1-4527-8129-14fe7afaf988): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 07:39:13 crc kubenswrapper[4929]: E1122 07:39:13.885651 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-bhx9f" podUID="1515626b-94a1-4527-8129-14fe7afaf988" Nov 22 07:39:14 crc kubenswrapper[4929]: I1122 07:39:14.042244 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerStarted","Data":"4af8d42be7ade4e495d8c3739790eff7d665ab2772650c09eb428bebdd475171"} Nov 22 07:39:14 crc kubenswrapper[4929]: E1122 07:39:14.544294 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-bhx9f" podUID="1515626b-94a1-4527-8129-14fe7afaf988" Nov 22 07:39:15 crc kubenswrapper[4929]: I1122 07:39:15.547042 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g-config-b2wj8"] Nov 22 07:39:15 crc kubenswrapper[4929]: W1122 07:39:15.554350 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42115253_ba3a_4bce_a4e4_54f2fddb63f4.slice/crio-0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06 WatchSource:0}: Error finding container 0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06: Status 404 returned error can't find the container with id 0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06 Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.062280 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-b2wj8" event={"ID":"42115253-ba3a-4bce-a4e4-54f2fddb63f4","Type":"ContainerStarted","Data":"0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06"} Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.064475 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerStarted","Data":"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7"} Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.067359 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerStarted","Data":"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099"} Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.071873 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerStarted","Data":"3e7d9920c9b3debca85ab0512edf6f13b85ecb1ea466aba46ecd872ca2118d71"} Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.073457 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2d73bc22-a412-491f-9484-71864027c02f","Type":"ContainerStarted","Data":"82eb8832bab0dffa6115099c68f22dcd62c329c990bb38a5e4dc6762573c30ea"} Nov 22 07:39:16 crc kubenswrapper[4929]: E1122 07:39:16.370727 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-northd-0" podUID="2d73bc22-a412-491f-9484-71864027c02f" Nov 22 07:39:16 crc kubenswrapper[4929]: I1122 07:39:16.952421 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-6p98g" Nov 22 07:39:17 crc kubenswrapper[4929]: I1122 07:39:17.084343 4929 generic.go:334] "Generic (PLEG): container finished" podID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerID="f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7" exitCode=0 Nov 22 07:39:17 crc kubenswrapper[4929]: I1122 07:39:17.084444 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerDied","Data":"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7"} Nov 22 07:39:17 crc kubenswrapper[4929]: I1122 07:39:17.087082 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-b2wj8" event={"ID":"42115253-ba3a-4bce-a4e4-54f2fddb63f4","Type":"ContainerStarted","Data":"6c200077ee5cc194de584f0657e735edb69a37eb71d9ea8c3c3c5e4407197f2a"} Nov 22 07:39:17 crc kubenswrapper[4929]: I1122 07:39:17.143538 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zv95t" podStartSLOduration=50.193989682 podStartE2EDuration="3m55.143517929s" podCreationTimestamp="2025-11-22 07:35:22 +0000 UTC" firstStartedPulling="2025-11-22 07:36:10.123225448 +0000 UTC m=+1507.232679461" lastFinishedPulling="2025-11-22 07:39:15.072753695 +0000 UTC m=+1692.182207708" observedRunningTime="2025-11-22 07:39:17.124703751 +0000 UTC m=+1694.234157764" watchObservedRunningTime="2025-11-22 07:39:17.143517929 +0000 UTC m=+1694.252971942" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.098932 4929 generic.go:334] "Generic (PLEG): container finished" podID="42115253-ba3a-4bce-a4e4-54f2fddb63f4" containerID="6c200077ee5cc194de584f0657e735edb69a37eb71d9ea8c3c3c5e4407197f2a" exitCode=0 Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.099085 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-b2wj8" event={"ID":"42115253-ba3a-4bce-a4e4-54f2fddb63f4","Type":"ContainerDied","Data":"6c200077ee5cc194de584f0657e735edb69a37eb71d9ea8c3c3c5e4407197f2a"} Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.126600 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cv5dp" podStartSLOduration=6.191650731 podStartE2EDuration="3m11.126582613s" podCreationTimestamp="2025-11-22 07:36:07 +0000 UTC" firstStartedPulling="2025-11-22 07:36:10.137850853 +0000 UTC m=+1507.247304866" lastFinishedPulling="2025-11-22 07:39:15.072782685 +0000 UTC m=+1692.182236748" observedRunningTime="2025-11-22 07:39:18.125095079 +0000 UTC m=+1695.234549102" watchObservedRunningTime="2025-11-22 07:39:18.126582613 +0000 UTC m=+1695.236036636" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.184943 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.460189 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.595184 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.595277 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.595324 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.596120 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:39:18 crc kubenswrapper[4929]: I1122 07:39:18.596192 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" gracePeriod=600 Nov 22 07:39:20 crc kubenswrapper[4929]: I1122 07:39:20.122568 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" exitCode=0 Nov 22 07:39:20 crc kubenswrapper[4929]: I1122 07:39:20.123422 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1"} Nov 22 07:39:20 crc kubenswrapper[4929]: I1122 07:39:20.123475 4929 scope.go:117] "RemoveContainer" containerID="5a8cd91f718685551c87c3db0c1786abf5a0657ef65dfd00afe7c250421c651c" Nov 22 07:39:23 crc kubenswrapper[4929]: I1122 07:39:23.262228 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:39:23 crc kubenswrapper[4929]: I1122 07:39:23.262561 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:39:24 crc kubenswrapper[4929]: I1122 07:39:24.320969 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:24 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:24 crc kubenswrapper[4929]: > Nov 22 07:39:26 crc kubenswrapper[4929]: E1122 07:39:26.035438 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:39:26 crc kubenswrapper[4929]: I1122 07:39:26.177755 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:39:26 crc kubenswrapper[4929]: E1122 07:39:26.178126 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:39:27 crc kubenswrapper[4929]: I1122 07:39:27.650196 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:39:27 crc kubenswrapper[4929]: I1122 07:39:27.651686 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:39:28 crc kubenswrapper[4929]: I1122 07:39:28.185082 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="76073571-729d-4de5-bda6-780d28ae6a9b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 22 07:39:28 crc kubenswrapper[4929]: I1122 07:39:28.460290 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="941ef061-1085-45ca-84e2-60447bb10c47" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 22 07:39:28 crc kubenswrapper[4929]: I1122 07:39:28.734827 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:28 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:28 crc kubenswrapper[4929]: > Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.322348 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.415023 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469453 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469558 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469623 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469668 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469714 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.469735 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wdm8\" (UniqueName: \"kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8\") pod \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\" (UID: \"42115253-ba3a-4bce-a4e4-54f2fddb63f4\") " Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.472087 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.472153 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.472180 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.472205 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run" (OuterVolumeSpecName: "var-run") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.473021 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts" (OuterVolumeSpecName: "scripts") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.485542 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8" (OuterVolumeSpecName: "kube-api-access-4wdm8") pod "42115253-ba3a-4bce-a4e4-54f2fddb63f4" (UID: "42115253-ba3a-4bce-a4e4-54f2fddb63f4"). InnerVolumeSpecName "kube-api-access-4wdm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572202 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572265 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wdm8\" (UniqueName: \"kubernetes.io/projected/42115253-ba3a-4bce-a4e4-54f2fddb63f4-kube-api-access-4wdm8\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572279 4929 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572292 4929 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572306 4929 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42115253-ba3a-4bce-a4e4-54f2fddb63f4-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:29 crc kubenswrapper[4929]: I1122 07:39:29.572317 4929 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42115253-ba3a-4bce-a4e4-54f2fddb63f4-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.214711 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-b2wj8" event={"ID":"42115253-ba3a-4bce-a4e4-54f2fddb63f4","Type":"ContainerDied","Data":"0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06"} Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.215054 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0dfc36e4978eff9a1557998842ac8527dab88f2b87af656011bdc8f9c36f9d06" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.214925 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-b2wj8" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.526999 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6p98g-config-b2wj8"] Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.538198 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-6p98g-config-b2wj8"] Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.629128 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-6p98g-config-t5mht"] Nov 22 07:39:30 crc kubenswrapper[4929]: E1122 07:39:30.629843 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42115253-ba3a-4bce-a4e4-54f2fddb63f4" containerName="ovn-config" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.629947 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="42115253-ba3a-4bce-a4e4-54f2fddb63f4" containerName="ovn-config" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.630275 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="42115253-ba3a-4bce-a4e4-54f2fddb63f4" containerName="ovn-config" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.631112 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.633773 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.640170 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g-config-t5mht"] Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.691615 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.691689 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.691734 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.691755 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.692135 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.692285 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr5jg\" (UniqueName: \"kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794323 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794384 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr5jg\" (UniqueName: \"kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794441 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794483 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794528 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794555 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794706 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794719 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.794988 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.795544 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.797081 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.815115 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr5jg\" (UniqueName: \"kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg\") pod \"ovn-controller-6p98g-config-t5mht\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:30 crc kubenswrapper[4929]: I1122 07:39:30.949764 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:31 crc kubenswrapper[4929]: I1122 07:39:31.958892 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42115253-ba3a-4bce-a4e4-54f2fddb63f4" path="/var/lib/kubelet/pods/42115253-ba3a-4bce-a4e4-54f2fddb63f4/volumes" Nov 22 07:39:34 crc kubenswrapper[4929]: I1122 07:39:34.278556 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-6p98g-config-t5mht"] Nov 22 07:39:34 crc kubenswrapper[4929]: I1122 07:39:34.313990 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:34 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:34 crc kubenswrapper[4929]: > Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.264122 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhx9f" event={"ID":"1515626b-94a1-4527-8129-14fe7afaf988","Type":"ContainerStarted","Data":"519b3e1517a3997a54b74a40b655eda7e88d7dd9808e98e73405082c94f7561a"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.269130 4929 generic.go:334] "Generic (PLEG): container finished" podID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" containerID="e8a4d0b6cf2507f1a320beb816beab7d14540a8c44dccd2d23f2e1f1e5c69680" exitCode=0 Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.269234 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2","Type":"ContainerDied","Data":"e8a4d0b6cf2507f1a320beb816beab7d14540a8c44dccd2d23f2e1f1e5c69680"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.271606 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"2d73bc22-a412-491f-9484-71864027c02f","Type":"ContainerStarted","Data":"c2cc510f8674a636c5866bb42d88dc9e1e86665b57df802b6ce595cf3aaac328"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.272874 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-t5mht" event={"ID":"612a3816-5128-496e-b640-63a90fa7dea2","Type":"ContainerStarted","Data":"ee53c046288a0eb94a8c2b619166355da40b3e29fc25e2dd9a7fec5adbe32c18"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.278692 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerStarted","Data":"e9b17618afb0793aa01abffd070b9e212c61420388a7c9a9773f5655d88a368a"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.280681 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerStarted","Data":"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.281977 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bbxj7" event={"ID":"4c640fe8-4583-4162-949d-4508edaca274","Type":"ContainerStarted","Data":"c198ae480154d845a0c2d018b9e974928baa33f90a748f7779a3043f05c0e3bf"} Nov 22 07:39:35 crc kubenswrapper[4929]: I1122 07:39:35.325731 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-bbxj7" podStartSLOduration=3.393788106 podStartE2EDuration="3m11.325713403s" podCreationTimestamp="2025-11-22 07:36:24 +0000 UTC" firstStartedPulling="2025-11-22 07:36:25.629618321 +0000 UTC m=+1522.739072344" lastFinishedPulling="2025-11-22 07:39:33.561543628 +0000 UTC m=+1710.670997641" observedRunningTime="2025-11-22 07:39:35.321884806 +0000 UTC m=+1712.431338829" watchObservedRunningTime="2025-11-22 07:39:35.325713403 +0000 UTC m=+1712.435167416" Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.293291 4929 generic.go:334] "Generic (PLEG): container finished" podID="6a23e882-732c-416e-bfc5-c91517389f64" containerID="2dec4097dc30d0da48950393a3aa3f99672eb9c0ec0b0a68d50a3ec2c56826a9" exitCode=0 Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.293432 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6a23e882-732c-416e-bfc5-c91517389f64","Type":"ContainerDied","Data":"2dec4097dc30d0da48950393a3aa3f99672eb9c0ec0b0a68d50a3ec2c56826a9"} Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.296292 4929 generic.go:334] "Generic (PLEG): container finished" podID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerID="e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886" exitCode=0 Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.296390 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerDied","Data":"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886"} Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.299013 4929 generic.go:334] "Generic (PLEG): container finished" podID="1515626b-94a1-4527-8129-14fe7afaf988" containerID="519b3e1517a3997a54b74a40b655eda7e88d7dd9808e98e73405082c94f7561a" exitCode=0 Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.299093 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhx9f" event={"ID":"1515626b-94a1-4527-8129-14fe7afaf988","Type":"ContainerDied","Data":"519b3e1517a3997a54b74a40b655eda7e88d7dd9808e98e73405082c94f7561a"} Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.302323 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2","Type":"ContainerStarted","Data":"9888e2c0cb305e703ba0ed033334aa299453a3cf6af652288674d97c8e786d42"} Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.305277 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-t5mht" event={"ID":"612a3816-5128-496e-b640-63a90fa7dea2","Type":"ContainerStarted","Data":"b3c0e34df4fdf9ce1db275bb8125355c4e79ea53d664d3e990696e39c4cb4aa0"} Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.305407 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.340543 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=47.821001323 podStartE2EDuration="3m7.340522889s" podCreationTimestamp="2025-11-22 07:36:29 +0000 UTC" firstStartedPulling="2025-11-22 07:37:14.446533991 +0000 UTC m=+1571.555988004" lastFinishedPulling="2025-11-22 07:39:33.966055557 +0000 UTC m=+1711.075509570" observedRunningTime="2025-11-22 07:39:36.334626305 +0000 UTC m=+1713.444080318" watchObservedRunningTime="2025-11-22 07:39:36.340522889 +0000 UTC m=+1713.449976902" Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.377064 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-6p98g-config-t5mht" podStartSLOduration=6.37704495 podStartE2EDuration="6.37704495s" podCreationTimestamp="2025-11-22 07:39:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:39:36.371898993 +0000 UTC m=+1713.481353006" watchObservedRunningTime="2025-11-22 07:39:36.37704495 +0000 UTC m=+1713.486498963" Nov 22 07:39:36 crc kubenswrapper[4929]: I1122 07:39:36.947472 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:39:36 crc kubenswrapper[4929]: E1122 07:39:36.948060 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.325174 4929 generic.go:334] "Generic (PLEG): container finished" podID="612a3816-5128-496e-b640-63a90fa7dea2" containerID="b3c0e34df4fdf9ce1db275bb8125355c4e79ea53d664d3e990696e39c4cb4aa0" exitCode=0 Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.325412 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-t5mht" event={"ID":"612a3816-5128-496e-b640-63a90fa7dea2","Type":"ContainerDied","Data":"b3c0e34df4fdf9ce1db275bb8125355c4e79ea53d664d3e990696e39c4cb4aa0"} Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.329563 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"6a23e882-732c-416e-bfc5-c91517389f64","Type":"ContainerStarted","Data":"b81ab808b52aef992094383ff856c6b224e35159503f98596c589f825f762cde"} Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.366483 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371668.488314 podStartE2EDuration="6m8.366461568s" podCreationTimestamp="2025-11-22 07:33:29 +0000 UTC" firstStartedPulling="2025-11-22 07:33:54.945032479 +0000 UTC m=+1372.054486492" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:39:37.359161052 +0000 UTC m=+1714.468615075" watchObservedRunningTime="2025-11-22 07:39:37.366461568 +0000 UTC m=+1714.475915581" Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.393658 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=239.572278535 podStartE2EDuration="6m9.393637496s" podCreationTimestamp="2025-11-22 07:33:28 +0000 UTC" firstStartedPulling="2025-11-22 07:33:54.937378163 +0000 UTC m=+1372.046832196" lastFinishedPulling="2025-11-22 07:36:04.758737104 +0000 UTC m=+1501.868191157" observedRunningTime="2025-11-22 07:39:37.392830938 +0000 UTC m=+1714.502284981" watchObservedRunningTime="2025-11-22 07:39:37.393637496 +0000 UTC m=+1714.503091509" Nov 22 07:39:37 crc kubenswrapper[4929]: I1122 07:39:37.957075 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.185370 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.699336 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.766694 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:38 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:38 crc kubenswrapper[4929]: > Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772548 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772618 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772653 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772683 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772697 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772749 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr5jg\" (UniqueName: \"kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.772847 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run\") pod \"612a3816-5128-496e-b640-63a90fa7dea2\" (UID: \"612a3816-5128-496e-b640-63a90fa7dea2\") " Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773404 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773434 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773522 4929 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773541 4929 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773553 4929 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.773578 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run" (OuterVolumeSpecName: "var-run") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.779178 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg" (OuterVolumeSpecName: "kube-api-access-pr5jg") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "kube-api-access-pr5jg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.875026 4929 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/612a3816-5128-496e-b640-63a90fa7dea2-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.875064 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr5jg\" (UniqueName: \"kubernetes.io/projected/612a3816-5128-496e-b640-63a90fa7dea2-kube-api-access-pr5jg\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.918915 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts" (OuterVolumeSpecName: "scripts") pod "612a3816-5128-496e-b640-63a90fa7dea2" (UID: "612a3816-5128-496e-b640-63a90fa7dea2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:38 crc kubenswrapper[4929]: I1122 07:39:38.976660 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/612a3816-5128-496e-b640-63a90fa7dea2-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.347949 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-6p98g-config-t5mht" event={"ID":"612a3816-5128-496e-b640-63a90fa7dea2","Type":"ContainerDied","Data":"ee53c046288a0eb94a8c2b619166355da40b3e29fc25e2dd9a7fec5adbe32c18"} Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.348004 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee53c046288a0eb94a8c2b619166355da40b3e29fc25e2dd9a7fec5adbe32c18" Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.348075 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-6p98g-config-t5mht" Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.779657 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-6p98g-config-t5mht"] Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.786590 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-6p98g-config-t5mht"] Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.797477 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.797526 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 22 07:39:39 crc kubenswrapper[4929]: I1122 07:39:39.957392 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612a3816-5128-496e-b640-63a90fa7dea2" path="/var/lib/kubelet/pods/612a3816-5128-496e-b640-63a90fa7dea2/volumes" Nov 22 07:39:41 crc kubenswrapper[4929]: I1122 07:39:41.281175 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 22 07:39:41 crc kubenswrapper[4929]: I1122 07:39:41.281280 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 22 07:39:44 crc kubenswrapper[4929]: I1122 07:39:44.304321 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:44 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:44 crc kubenswrapper[4929]: > Nov 22 07:39:45 crc kubenswrapper[4929]: I1122 07:39:45.596340 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 22 07:39:48 crc kubenswrapper[4929]: I1122 07:39:48.696415 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:48 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:48 crc kubenswrapper[4929]: > Nov 22 07:39:51 crc kubenswrapper[4929]: I1122 07:39:51.947934 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:39:51 crc kubenswrapper[4929]: E1122 07:39:51.948776 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:39:54 crc kubenswrapper[4929]: I1122 07:39:54.321310 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:54 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:54 crc kubenswrapper[4929]: > Nov 22 07:39:55 crc kubenswrapper[4929]: I1122 07:39:55.497255 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerStarted","Data":"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0"} Nov 22 07:39:55 crc kubenswrapper[4929]: I1122 07:39:55.500234 4929 generic.go:334] "Generic (PLEG): container finished" podID="4c640fe8-4583-4162-949d-4508edaca274" containerID="c198ae480154d845a0c2d018b9e974928baa33f90a748f7779a3043f05c0e3bf" exitCode=0 Nov 22 07:39:55 crc kubenswrapper[4929]: I1122 07:39:55.500292 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bbxj7" event={"ID":"4c640fe8-4583-4162-949d-4508edaca274","Type":"ContainerDied","Data":"c198ae480154d845a0c2d018b9e974928baa33f90a748f7779a3043f05c0e3bf"} Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.514747 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerStarted","Data":"78076513f20aaa41e8620d83addfa9d897a09eccbd856937cebb6dde40c19b10"} Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.517023 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bhx9f" event={"ID":"1515626b-94a1-4527-8129-14fe7afaf988","Type":"ContainerStarted","Data":"496465d8d7f819f96c07c5f0043b781e9c8c1a0582b8ad0de3fe73da38767503"} Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.543068 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=24.32510492 podStartE2EDuration="6m23.543045965s" podCreationTimestamp="2025-11-22 07:33:33 +0000 UTC" firstStartedPulling="2025-11-22 07:33:55.552834521 +0000 UTC m=+1372.662288534" lastFinishedPulling="2025-11-22 07:39:54.770775566 +0000 UTC m=+1731.880229579" observedRunningTime="2025-11-22 07:39:56.538050941 +0000 UTC m=+1733.647504954" watchObservedRunningTime="2025-11-22 07:39:56.543045965 +0000 UTC m=+1733.652499978" Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.561370 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-n8qwb" podStartSLOduration=116.123150573 podStartE2EDuration="2m33.561345101s" podCreationTimestamp="2025-11-22 07:37:23 +0000 UTC" firstStartedPulling="2025-11-22 07:39:17.086598124 +0000 UTC m=+1694.196052177" lastFinishedPulling="2025-11-22 07:39:54.524792692 +0000 UTC m=+1731.634246705" observedRunningTime="2025-11-22 07:39:56.558400654 +0000 UTC m=+1733.667854667" watchObservedRunningTime="2025-11-22 07:39:56.561345101 +0000 UTC m=+1733.670799124" Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.583232 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bhx9f" podStartSLOduration=26.343516094 podStartE2EDuration="2m45.583192148s" podCreationTimestamp="2025-11-22 07:37:11 +0000 UTC" firstStartedPulling="2025-11-22 07:37:35.530186281 +0000 UTC m=+1592.639640344" lastFinishedPulling="2025-11-22 07:39:54.769862385 +0000 UTC m=+1731.879316398" observedRunningTime="2025-11-22 07:39:56.576350782 +0000 UTC m=+1733.685804805" watchObservedRunningTime="2025-11-22 07:39:56.583192148 +0000 UTC m=+1733.692646151" Nov 22 07:39:56 crc kubenswrapper[4929]: I1122 07:39:56.928116 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106147 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106251 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106400 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106447 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9s5rh\" (UniqueName: \"kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106549 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106630 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.106663 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift\") pod \"4c640fe8-4583-4162-949d-4508edaca274\" (UID: \"4c640fe8-4583-4162-949d-4508edaca274\") " Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.107766 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.108039 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.108687 4929 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/4c640fe8-4583-4162-949d-4508edaca274-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.108706 4929 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.113448 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh" (OuterVolumeSpecName: "kube-api-access-9s5rh") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "kube-api-access-9s5rh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.115959 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.131618 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts" (OuterVolumeSpecName: "scripts") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.140196 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.141388 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "4c640fe8-4583-4162-949d-4508edaca274" (UID: "4c640fe8-4583-4162-949d-4508edaca274"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.210355 4929 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.210586 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9s5rh\" (UniqueName: \"kubernetes.io/projected/4c640fe8-4583-4162-949d-4508edaca274-kube-api-access-9s5rh\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.210596 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.210604 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c640fe8-4583-4162-949d-4508edaca274-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.210613 4929 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/4c640fe8-4583-4162-949d-4508edaca274-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.527870 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bbxj7" event={"ID":"4c640fe8-4583-4162-949d-4508edaca274","Type":"ContainerDied","Data":"6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628"} Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.527925 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ed9f0e7e535cb69c4343758331555fa33c3390b10ef0ad33f51647052d52628" Nov 22 07:39:57 crc kubenswrapper[4929]: I1122 07:39:57.528576 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bbxj7" Nov 22 07:39:58 crc kubenswrapper[4929]: I1122 07:39:58.707239 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" probeResult="failure" output=< Nov 22 07:39:58 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:39:58 crc kubenswrapper[4929]: > Nov 22 07:39:59 crc kubenswrapper[4929]: I1122 07:39:59.616194 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:00 crc kubenswrapper[4929]: I1122 07:40:00.375831 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 22 07:40:00 crc kubenswrapper[4929]: I1122 07:40:00.447070 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="6a23e882-732c-416e-bfc5-c91517389f64" containerName="galera" probeResult="failure" output=< Nov 22 07:40:00 crc kubenswrapper[4929]: wsrep_local_state_comment (Joined) differs from Synced Nov 22 07:40:00 crc kubenswrapper[4929]: > Nov 22 07:40:01 crc kubenswrapper[4929]: I1122 07:40:01.963796 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:40:01 crc kubenswrapper[4929]: I1122 07:40:01.964103 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:40:02 crc kubenswrapper[4929]: I1122 07:40:02.009325 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:40:02 crc kubenswrapper[4929]: I1122 07:40:02.619088 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bhx9f" Nov 22 07:40:02 crc kubenswrapper[4929]: I1122 07:40:02.699516 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bhx9f"] Nov 22 07:40:02 crc kubenswrapper[4929]: I1122 07:40:02.741776 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:40:02 crc kubenswrapper[4929]: I1122 07:40:02.742267 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-sw9px" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" containerID="cri-o://018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" gracePeriod=2 Nov 22 07:40:03 crc kubenswrapper[4929]: I1122 07:40:03.583024 4929 generic.go:334] "Generic (PLEG): container finished" podID="29c338ed-e46c-47a4-bdfc-826204c08156" containerID="018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" exitCode=0 Nov 22 07:40:03 crc kubenswrapper[4929]: I1122 07:40:03.583131 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerDied","Data":"018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955"} Nov 22 07:40:03 crc kubenswrapper[4929]: E1122 07:40:03.973756 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955 is running failed: container process not found" containerID="018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:40:03 crc kubenswrapper[4929]: E1122 07:40:03.974770 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955 is running failed: container process not found" containerID="018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:40:03 crc kubenswrapper[4929]: E1122 07:40:03.975176 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955 is running failed: container process not found" containerID="018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:40:03 crc kubenswrapper[4929]: E1122 07:40:03.975204 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-sw9px" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" Nov 22 07:40:03 crc kubenswrapper[4929]: I1122 07:40:03.986541 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.119930 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.119972 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.125342 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities\") pod \"29c338ed-e46c-47a4-bdfc-826204c08156\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.125406 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content\") pod \"29c338ed-e46c-47a4-bdfc-826204c08156\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.125519 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjf7h\" (UniqueName: \"kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h\") pod \"29c338ed-e46c-47a4-bdfc-826204c08156\" (UID: \"29c338ed-e46c-47a4-bdfc-826204c08156\") " Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.126041 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities" (OuterVolumeSpecName: "utilities") pod "29c338ed-e46c-47a4-bdfc-826204c08156" (UID: "29c338ed-e46c-47a4-bdfc-826204c08156"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.126782 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.133882 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h" (OuterVolumeSpecName: "kube-api-access-mjf7h") pod "29c338ed-e46c-47a4-bdfc-826204c08156" (UID: "29c338ed-e46c-47a4-bdfc-826204c08156"). InnerVolumeSpecName "kube-api-access-mjf7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.172343 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29c338ed-e46c-47a4-bdfc-826204c08156" (UID: "29c338ed-e46c-47a4-bdfc-826204c08156"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.186913 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.229273 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29c338ed-e46c-47a4-bdfc-826204c08156-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.229305 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjf7h\" (UniqueName: \"kubernetes.io/projected/29c338ed-e46c-47a4-bdfc-826204c08156-kube-api-access-mjf7h\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.316902 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:04 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:04 crc kubenswrapper[4929]: > Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.458016 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.549308 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2" containerName="galera" probeResult="failure" output=< Nov 22 07:40:04 crc kubenswrapper[4929]: wsrep_local_state_comment (Joined) differs from Synced Nov 22 07:40:04 crc kubenswrapper[4929]: > Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.595416 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-sw9px" event={"ID":"29c338ed-e46c-47a4-bdfc-826204c08156","Type":"ContainerDied","Data":"e9570e9079cec9aa29e66874fd2bff562e3656eeacb2b3f9be6127beb352943d"} Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.595498 4929 scope.go:117] "RemoveContainer" containerID="018932e68d5b6f86129db4cfd38c93d35763611cc74fff643b9430e3e3ee3955" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.595503 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-sw9px" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.616608 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.617850 4929 scope.go:117] "RemoveContainer" containerID="01da40ba0a21ed092a8532abdd6c57e40fc13ef453e926f1a32a3fdb752acad9" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.618808 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.632803 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.648885 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-sw9px"] Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.671887 4929 scope.go:117] "RemoveContainer" containerID="f2c70cab1114248f2e7f8cc461972afe38a0c74ffb623332951a66b7044e75e0" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.683714 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:04 crc kubenswrapper[4929]: I1122 07:40:04.947359 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:40:04 crc kubenswrapper[4929]: E1122 07:40:04.947604 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:40:05 crc kubenswrapper[4929]: I1122 07:40:05.609564 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:05 crc kubenswrapper[4929]: I1122 07:40:05.957897 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" path="/var/lib/kubelet/pods/29c338ed-e46c-47a4-bdfc-826204c08156/volumes" Nov 22 07:40:06 crc kubenswrapper[4929]: I1122 07:40:06.451137 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:40:06 crc kubenswrapper[4929]: I1122 07:40:06.615131 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-n8qwb" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="registry-server" containerID="cri-o://f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0" gracePeriod=2 Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.402196 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.581990 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.624709 4929 generic.go:334] "Generic (PLEG): container finished" podID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerID="f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0" exitCode=0 Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.624779 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-n8qwb" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.624817 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerDied","Data":"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0"} Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.624884 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-n8qwb" event={"ID":"f8b580fe-2477-455b-bdaa-b59a1d93c986","Type":"ContainerDied","Data":"4af8d42be7ade4e495d8c3739790eff7d665ab2772650c09eb428bebdd475171"} Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.624907 4929 scope.go:117] "RemoveContainer" containerID="f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.625391 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="config-reloader" containerID="cri-o://68b98099fc432be4259fb71ff85ba2185f0ceeac47d7364cb0ab58369a1fe9b8" gracePeriod=600 Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.625461 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="thanos-sidecar" containerID="cri-o://78076513f20aaa41e8620d83addfa9d897a09eccbd856937cebb6dde40c19b10" gracePeriod=600 Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.625474 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="prometheus" containerID="cri-o://e9b17618afb0793aa01abffd070b9e212c61420388a7c9a9773f5655d88a368a" gracePeriod=600 Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.650927 4929 scope.go:117] "RemoveContainer" containerID="e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.669104 4929 scope.go:117] "RemoveContainer" containerID="f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.687239 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content\") pod \"f8b580fe-2477-455b-bdaa-b59a1d93c986\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.687317 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities\") pod \"f8b580fe-2477-455b-bdaa-b59a1d93c986\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.687375 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84lw7\" (UniqueName: \"kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7\") pod \"f8b580fe-2477-455b-bdaa-b59a1d93c986\" (UID: \"f8b580fe-2477-455b-bdaa-b59a1d93c986\") " Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.692449 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities" (OuterVolumeSpecName: "utilities") pod "f8b580fe-2477-455b-bdaa-b59a1d93c986" (UID: "f8b580fe-2477-455b-bdaa-b59a1d93c986"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.695863 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7" (OuterVolumeSpecName: "kube-api-access-84lw7") pod "f8b580fe-2477-455b-bdaa-b59a1d93c986" (UID: "f8b580fe-2477-455b-bdaa-b59a1d93c986"). InnerVolumeSpecName "kube-api-access-84lw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.695932 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.741747 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8b580fe-2477-455b-bdaa-b59a1d93c986" (UID: "f8b580fe-2477-455b-bdaa-b59a1d93c986"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.754285 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.793727 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.793846 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8b580fe-2477-455b-bdaa-b59a1d93c986-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.793913 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84lw7\" (UniqueName: \"kubernetes.io/projected/f8b580fe-2477-455b-bdaa-b59a1d93c986-kube-api-access-84lw7\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.847396 4929 scope.go:117] "RemoveContainer" containerID="f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0" Nov 22 07:40:07 crc kubenswrapper[4929]: E1122 07:40:07.848843 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0\": container with ID starting with f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0 not found: ID does not exist" containerID="f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.848870 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0"} err="failed to get container status \"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0\": rpc error: code = NotFound desc = could not find container \"f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0\": container with ID starting with f82888111ea6ac75671f38230813e100c7dc0d6f4f423ec147dfdc525209f9f0 not found: ID does not exist" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.848893 4929 scope.go:117] "RemoveContainer" containerID="e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886" Nov 22 07:40:07 crc kubenswrapper[4929]: E1122 07:40:07.849722 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886\": container with ID starting with e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886 not found: ID does not exist" containerID="e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.849751 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886"} err="failed to get container status \"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886\": rpc error: code = NotFound desc = could not find container \"e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886\": container with ID starting with e14ff14610b57c661819d4fcf25a1be9e62291e8d798b4b704db251e3f4c4886 not found: ID does not exist" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.849765 4929 scope.go:117] "RemoveContainer" containerID="f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7" Nov 22 07:40:07 crc kubenswrapper[4929]: E1122 07:40:07.850075 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7\": container with ID starting with f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7 not found: ID does not exist" containerID="f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.850095 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7"} err="failed to get container status \"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7\": rpc error: code = NotFound desc = could not find container \"f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7\": container with ID starting with f43c0cfa78d03908bdc7c2b7ab8476cc34ae24563f52b8f8efe0f13705c2edc7 not found: ID does not exist" Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.960482 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:40:07 crc kubenswrapper[4929]: I1122 07:40:07.964809 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-n8qwb"] Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637425 4929 generic.go:334] "Generic (PLEG): container finished" podID="fad488a3-81d2-42c9-9140-9ca981927e13" containerID="78076513f20aaa41e8620d83addfa9d897a09eccbd856937cebb6dde40c19b10" exitCode=0 Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637472 4929 generic.go:334] "Generic (PLEG): container finished" podID="fad488a3-81d2-42c9-9140-9ca981927e13" containerID="e9b17618afb0793aa01abffd070b9e212c61420388a7c9a9773f5655d88a368a" exitCode=0 Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637489 4929 generic.go:334] "Generic (PLEG): container finished" podID="fad488a3-81d2-42c9-9140-9ca981927e13" containerID="68b98099fc432be4259fb71ff85ba2185f0ceeac47d7364cb0ab58369a1fe9b8" exitCode=0 Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637487 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerDied","Data":"78076513f20aaa41e8620d83addfa9d897a09eccbd856937cebb6dde40c19b10"} Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637592 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerDied","Data":"e9b17618afb0793aa01abffd070b9e212c61420388a7c9a9773f5655d88a368a"} Nov 22 07:40:08 crc kubenswrapper[4929]: I1122 07:40:08.637715 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerDied","Data":"68b98099fc432be4259fb71ff85ba2185f0ceeac47d7364cb0ab58369a1fe9b8"} Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.278145 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419215 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419294 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419496 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419598 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419637 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8w4x\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419665 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419694 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.419769 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0\") pod \"fad488a3-81d2-42c9-9140-9ca981927e13\" (UID: \"fad488a3-81d2-42c9-9140-9ca981927e13\") " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.421056 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.425830 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.426305 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x" (OuterVolumeSpecName: "kube-api-access-f8w4x") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "kube-api-access-f8w4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.427155 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.427415 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out" (OuterVolumeSpecName: "config-out") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.429599 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config" (OuterVolumeSpecName: "config") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.447809 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config" (OuterVolumeSpecName: "web-config") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.462979 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "fad488a3-81d2-42c9-9140-9ca981927e13" (UID: "fad488a3-81d2-42c9-9140-9ca981927e13"). InnerVolumeSpecName "pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521707 4929 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-web-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521750 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8w4x\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-kube-api-access-f8w4x\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521763 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521772 4929 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fad488a3-81d2-42c9-9140-9ca981927e13-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521784 4929 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fad488a3-81d2-42c9-9140-9ca981927e13-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521793 4929 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fad488a3-81d2-42c9-9140-9ca981927e13-config-out\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521802 4929 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fad488a3-81d2-42c9-9140-9ca981927e13-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.521835 4929 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") on node \"crc\" " Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.544652 4929 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.544848 4929 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676") on node "crc" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.623670 4929 reconciler_common.go:293] "Volume detached for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.649306 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fad488a3-81d2-42c9-9140-9ca981927e13","Type":"ContainerDied","Data":"b5e914878d646a386cf3afe950d9573e0470e916e2e8f13a7dc9fbfb0933d6b1"} Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.649378 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.649842 4929 scope.go:117] "RemoveContainer" containerID="78076513f20aaa41e8620d83addfa9d897a09eccbd856937cebb6dde40c19b10" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.672470 4929 scope.go:117] "RemoveContainer" containerID="e9b17618afb0793aa01abffd070b9e212c61420388a7c9a9773f5655d88a368a" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.681835 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.689362 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.689942 4929 scope.go:117] "RemoveContainer" containerID="68b98099fc432be4259fb71ff85ba2185f0ceeac47d7364cb0ab58369a1fe9b8" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708167 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708669 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="thanos-sidecar" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708696 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="thanos-sidecar" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708713 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="init-config-reloader" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708724 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="init-config-reloader" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708739 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612a3816-5128-496e-b640-63a90fa7dea2" containerName="ovn-config" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708747 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="612a3816-5128-496e-b640-63a90fa7dea2" containerName="ovn-config" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708757 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708764 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708778 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="prometheus" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708786 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="prometheus" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708800 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="extract-utilities" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708808 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="extract-utilities" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708822 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708829 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708845 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="extract-content" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708852 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="extract-content" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708865 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="extract-content" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708873 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="extract-content" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708888 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c640fe8-4583-4162-949d-4508edaca274" containerName="swift-ring-rebalance" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708895 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c640fe8-4583-4162-949d-4508edaca274" containerName="swift-ring-rebalance" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708906 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="extract-utilities" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708914 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="extract-utilities" Nov 22 07:40:09 crc kubenswrapper[4929]: E1122 07:40:09.708938 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="config-reloader" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.708945 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="config-reloader" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709121 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="thanos-sidecar" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709136 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="config-reloader" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709150 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" containerName="prometheus" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709162 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c640fe8-4583-4162-949d-4508edaca274" containerName="swift-ring-rebalance" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709176 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c338ed-e46c-47a4-bdfc-826204c08156" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709187 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="612a3816-5128-496e-b640-63a90fa7dea2" containerName="ovn-config" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.709200 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" containerName="registry-server" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.710162 4929 scope.go:117] "RemoveContainer" containerID="abdb302fcdf4f2912c2350f0344010bb5907017916e3de85bb61deb9d8084109" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.712341 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717162 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717411 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-r6m55" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717415 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717468 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717523 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.717574 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.739188 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.745345 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827050 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827142 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827176 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827293 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827319 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827378 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827442 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkhgb\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-kube-api-access-qkhgb\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827692 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827757 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827826 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.827886 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.928919 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.928981 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929011 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929032 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929062 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929098 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929124 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929299 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929326 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkhgb\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-kube-api-access-qkhgb\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929373 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929406 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.929892 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.933606 4929 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.933649 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/26190b449a803e3db988a49e35880969bbb223ecf15515b6482b10e2b9da6530/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.934188 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.934198 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.934636 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.934738 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.936247 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.936467 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.936568 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.937421 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.946171 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkhgb\" (UniqueName: \"kubernetes.io/projected/fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4-kube-api-access-qkhgb\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.956123 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8b580fe-2477-455b-bdaa-b59a1d93c986" path="/var/lib/kubelet/pods/f8b580fe-2477-455b-bdaa-b59a1d93c986/volumes" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.957117 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad488a3-81d2-42c9-9140-9ca981927e13" path="/var/lib/kubelet/pods/fad488a3-81d2-42c9-9140-9ca981927e13/volumes" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.972164 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 22 07:40:09 crc kubenswrapper[4929]: I1122 07:40:09.982428 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f4cf4a4a-57a7-4c8b-a981-66432e65a676\") pod \"prometheus-metric-storage-0\" (UID: \"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4\") " pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:10 crc kubenswrapper[4929]: I1122 07:40:10.062181 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:10 crc kubenswrapper[4929]: I1122 07:40:10.485617 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 22 07:40:10 crc kubenswrapper[4929]: I1122 07:40:10.651266 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:40:10 crc kubenswrapper[4929]: I1122 07:40:10.651842 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cv5dp" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" containerID="cri-o://3e7d9920c9b3debca85ab0512edf6f13b85ecb1ea466aba46ecd872ca2118d71" gracePeriod=2 Nov 22 07:40:10 crc kubenswrapper[4929]: I1122 07:40:10.665603 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerStarted","Data":"10faa57c059db79859d990588627c309637d432083c71a6bfda4e9d07e3737b1"} Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.315741 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-b7aa-account-create-6jf49"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.317100 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.319414 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.331752 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b7aa-account-create-6jf49"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.357436 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.357728 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kc29j\" (UniqueName: \"kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.377732 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-f7jtj"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.379131 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.392987 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-f7jtj"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.416801 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.459579 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vltm8\" (UniqueName: \"kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.460166 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.460344 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kc29j\" (UniqueName: \"kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.460765 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.460995 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.483951 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kc29j\" (UniqueName: \"kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j\") pod \"keystone-b7aa-account-create-6jf49\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.562378 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.562453 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vltm8\" (UniqueName: \"kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.563108 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.581535 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vltm8\" (UniqueName: \"kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8\") pod \"keystone-db-create-f7jtj\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.633776 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.635065 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-5t55t"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.636417 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.641329 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5t55t"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.660228 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-e669-account-create-phsc5"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.661473 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.662948 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.663951 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.664091 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vwvz\" (UniqueName: \"kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.671036 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e669-account-create-phsc5"] Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.687253 4929 generic.go:334] "Generic (PLEG): container finished" podID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerID="3e7d9920c9b3debca85ab0512edf6f13b85ecb1ea466aba46ecd872ca2118d71" exitCode=0 Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.688513 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerDied","Data":"3e7d9920c9b3debca85ab0512edf6f13b85ecb1ea466aba46ecd872ca2118d71"} Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.688562 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cv5dp" event={"ID":"91c4ac46-cc90-4399-9ef0-45c4b010ff70","Type":"ContainerDied","Data":"31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04"} Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.688576 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31129277b064588896f59c55be51b0f42eefcb68adeb752b0f937dcdd95a0e04" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.703914 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.766144 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vb95p\" (UniqueName: \"kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.766193 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vwvz\" (UniqueName: \"kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.766297 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.766464 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.767791 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.781345 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.867109 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities\") pod \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.867627 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k8tz\" (UniqueName: \"kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz\") pod \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.867715 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content\") pod \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\" (UID: \"91c4ac46-cc90-4399-9ef0-45c4b010ff70\") " Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.867875 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities" (OuterVolumeSpecName: "utilities") pod "91c4ac46-cc90-4399-9ef0-45c4b010ff70" (UID: "91c4ac46-cc90-4399-9ef0-45c4b010ff70"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.867969 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.868119 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vb95p\" (UniqueName: \"kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.868305 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.868619 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.893770 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91c4ac46-cc90-4399-9ef0-45c4b010ff70" (UID: "91c4ac46-cc90-4399-9ef0-45c4b010ff70"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.973044 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz" (OuterVolumeSpecName: "kube-api-access-8k8tz") pod "91c4ac46-cc90-4399-9ef0-45c4b010ff70" (UID: "91c4ac46-cc90-4399-9ef0-45c4b010ff70"). InnerVolumeSpecName "kube-api-access-8k8tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.973114 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91c4ac46-cc90-4399-9ef0-45c4b010ff70-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.973915 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vb95p\" (UniqueName: \"kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p\") pod \"placement-e669-account-create-phsc5\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:11 crc kubenswrapper[4929]: I1122 07:40:11.974478 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vwvz\" (UniqueName: \"kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz\") pod \"placement-db-create-5t55t\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " pod="openstack/placement-db-create-5t55t" Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.074339 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k8tz\" (UniqueName: \"kubernetes.io/projected/91c4ac46-cc90-4399-9ef0-45c4b010ff70-kube-api-access-8k8tz\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.099561 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5t55t" Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.103299 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-b7aa-account-create-6jf49"] Nov 22 07:40:12 crc kubenswrapper[4929]: W1122 07:40:12.116402 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8043acad_43a7_4549_b279_b9545f71283a.slice/crio-4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502 WatchSource:0}: Error finding container 4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502: Status 404 returned error can't find the container with id 4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502 Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.128775 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.209064 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-f7jtj"] Nov 22 07:40:12 crc kubenswrapper[4929]: W1122 07:40:12.225474 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b4b632b_b258_4862_aeda_4c06647d490f.slice/crio-a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4 WatchSource:0}: Error finding container a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4: Status 404 returned error can't find the container with id a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4 Nov 22 07:40:12 crc kubenswrapper[4929]: I1122 07:40:12.328528 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-5t55t"] Nov 22 07:40:12 crc kubenswrapper[4929]: W1122 07:40:12.346171 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49469bee_1b64_40ca_a873_bf08fc285efa.slice/crio-762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98 WatchSource:0}: Error finding container 762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98: Status 404 returned error can't find the container with id 762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.640400 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e669-account-create-phsc5"] Nov 22 07:40:16 crc kubenswrapper[4929]: W1122 07:40:12.651050 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod757783e8_e867_426d_94e0_952fd124f60c.slice/crio-00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198 WatchSource:0}: Error finding container 00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198: Status 404 returned error can't find the container with id 00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.699474 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5t55t" event={"ID":"49469bee-1b64-40ca-a873-bf08fc285efa","Type":"ContainerStarted","Data":"762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.700778 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b7aa-account-create-6jf49" event={"ID":"8043acad-43a7-4549-b279-b9545f71283a","Type":"ContainerStarted","Data":"4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.702397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-f7jtj" event={"ID":"9b4b632b-b258-4862-aeda-4c06647d490f","Type":"ContainerStarted","Data":"d814fab54abebc0695b9ebfc9536715873cfb3bc9355b8855fb00f4bd8f7acad"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.702427 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-f7jtj" event={"ID":"9b4b632b-b258-4862-aeda-4c06647d490f","Type":"ContainerStarted","Data":"a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.704883 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cv5dp" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.706698 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e669-account-create-phsc5" event={"ID":"757783e8-e867-426d-94e0-952fd124f60c","Type":"ContainerStarted","Data":"00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.988740 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:12.994487 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cv5dp"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.364242 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-xhj8d"] Nov 22 07:40:16 crc kubenswrapper[4929]: E1122 07:40:13.364653 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.364669 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" Nov 22 07:40:16 crc kubenswrapper[4929]: E1122 07:40:13.364709 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="extract-utilities" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.364719 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="extract-utilities" Nov 22 07:40:16 crc kubenswrapper[4929]: E1122 07:40:13.364734 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="extract-content" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.364742 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="extract-content" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.380736 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" containerName="registry-server" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.381876 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.395572 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-xhj8d"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.460990 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-0fbc-account-create-xmjgc"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.462322 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.464018 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.479344 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-0fbc-account-create-xmjgc"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.506188 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjr52\" (UniqueName: \"kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.506360 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.506414 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.506435 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9fwt\" (UniqueName: \"kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.607830 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9fwt\" (UniqueName: \"kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.608012 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjr52\" (UniqueName: \"kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.608134 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.608196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.609057 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.609058 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.633580 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9fwt\" (UniqueName: \"kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt\") pod \"watcher-0fbc-account-create-xmjgc\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.636482 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjr52\" (UniqueName: \"kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52\") pod \"watcher-db-create-xhj8d\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.708788 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.725116 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerStarted","Data":"99d1638126f7d8c3f4efe9ff5b703fffdccd0e155db3d9aede5e34e096262216"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.727291 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e669-account-create-phsc5" event={"ID":"757783e8-e867-426d-94e0-952fd124f60c","Type":"ContainerStarted","Data":"30cad07f2dd9f9365feecc12d701858460cfd0c45f0e781fdfc51deecb822163"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.731893 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5t55t" event={"ID":"49469bee-1b64-40ca-a873-bf08fc285efa","Type":"ContainerStarted","Data":"b980a08b1ce359fe5972912ad25bffd1d5cc9683e1b6d780ac37576e9a24439f"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.736351 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b7aa-account-create-6jf49" event={"ID":"8043acad-43a7-4549-b279-b9545f71283a","Type":"ContainerStarted","Data":"656d532293189756f2238e4e8348282eadcb49d1bf4f0e81b242df8a0f93f12f"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.778736 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.781470 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-f7jtj" podStartSLOduration=2.781456665 podStartE2EDuration="2.781456665s" podCreationTimestamp="2025-11-22 07:40:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:13.77245333 +0000 UTC m=+1750.881907373" watchObservedRunningTime="2025-11-22 07:40:13.781456665 +0000 UTC m=+1750.890910698" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.799023 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-e669-account-create-phsc5" podStartSLOduration=2.799006824 podStartE2EDuration="2.799006824s" podCreationTimestamp="2025-11-22 07:40:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:13.786033499 +0000 UTC m=+1750.895487532" watchObservedRunningTime="2025-11-22 07:40:13.799006824 +0000 UTC m=+1750.908460827" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.810012 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-b7aa-account-create-6jf49" podStartSLOduration=2.809993824 podStartE2EDuration="2.809993824s" podCreationTimestamp="2025-11-22 07:40:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:13.79883752 +0000 UTC m=+1750.908291533" watchObservedRunningTime="2025-11-22 07:40:13.809993824 +0000 UTC m=+1750.919447837" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.822713 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-5t55t" podStartSLOduration=2.822697003 podStartE2EDuration="2.822697003s" podCreationTimestamp="2025-11-22 07:40:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:13.813237488 +0000 UTC m=+1750.922691521" watchObservedRunningTime="2025-11-22 07:40:13.822697003 +0000 UTC m=+1750.932151006" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:13.960292 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91c4ac46-cc90-4399-9ef0-45c4b010ff70" path="/var/lib/kubelet/pods/91c4ac46-cc90-4399-9ef0-45c4b010ff70/volumes" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:14.109535 4929 scope.go:117] "RemoveContainer" containerID="45fe7d2bb56386a921f77f472c5727cf1f9b0002470c0b94367283e159b4f98d" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:14.302558 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:16 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:16 crc kubenswrapper[4929]: > Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:15.756034 4929 generic.go:334] "Generic (PLEG): container finished" podID="9b4b632b-b258-4862-aeda-4c06647d490f" containerID="d814fab54abebc0695b9ebfc9536715873cfb3bc9355b8855fb00f4bd8f7acad" exitCode=0 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:15.756200 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-f7jtj" event={"ID":"9b4b632b-b258-4862-aeda-4c06647d490f","Type":"ContainerDied","Data":"d814fab54abebc0695b9ebfc9536715873cfb3bc9355b8855fb00f4bd8f7acad"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.752097 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-0fbc-account-create-xmjgc"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.768198 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-xhj8d"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.783288 4929 generic.go:334] "Generic (PLEG): container finished" podID="757783e8-e867-426d-94e0-952fd124f60c" containerID="30cad07f2dd9f9365feecc12d701858460cfd0c45f0e781fdfc51deecb822163" exitCode=0 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.783354 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e669-account-create-phsc5" event={"ID":"757783e8-e867-426d-94e0-952fd124f60c","Type":"ContainerDied","Data":"30cad07f2dd9f9365feecc12d701858460cfd0c45f0e781fdfc51deecb822163"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.784900 4929 generic.go:334] "Generic (PLEG): container finished" podID="49469bee-1b64-40ca-a873-bf08fc285efa" containerID="b980a08b1ce359fe5972912ad25bffd1d5cc9683e1b6d780ac37576e9a24439f" exitCode=0 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.784948 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5t55t" event={"ID":"49469bee-1b64-40ca-a873-bf08fc285efa","Type":"ContainerDied","Data":"b980a08b1ce359fe5972912ad25bffd1d5cc9683e1b6d780ac37576e9a24439f"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.786931 4929 generic.go:334] "Generic (PLEG): container finished" podID="8043acad-43a7-4549-b279-b9545f71283a" containerID="656d532293189756f2238e4e8348282eadcb49d1bf4f0e81b242df8a0f93f12f" exitCode=0 Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.786995 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b7aa-account-create-6jf49" event={"ID":"8043acad-43a7-4549-b279-b9545f71283a","Type":"ContainerDied","Data":"656d532293189756f2238e4e8348282eadcb49d1bf4f0e81b242df8a0f93f12f"} Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.843594 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-xh9z8"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.844995 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.849297 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xh9z8"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.924361 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-618d-account-create-prpvg"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.925547 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.930707 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.943722 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-618d-account-create-prpvg"] Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.947457 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:40:16 crc kubenswrapper[4929]: E1122 07:40:16.947725 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.976429 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:16 crc kubenswrapper[4929]: I1122 07:40:16.976559 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jxbq\" (UniqueName: \"kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.078844 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55rmr\" (UniqueName: \"kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.079059 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jxbq\" (UniqueName: \"kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.079084 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.079196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.080851 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.099428 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jxbq\" (UniqueName: \"kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq\") pod \"glance-db-create-xh9z8\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.120664 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.181169 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55rmr\" (UniqueName: \"kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.181250 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.182090 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.205710 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55rmr\" (UniqueName: \"kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr\") pod \"glance-618d-account-create-prpvg\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.282192 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vltm8\" (UniqueName: \"kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8\") pod \"9b4b632b-b258-4862-aeda-4c06647d490f\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.282361 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts\") pod \"9b4b632b-b258-4862-aeda-4c06647d490f\" (UID: \"9b4b632b-b258-4862-aeda-4c06647d490f\") " Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.283080 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b4b632b-b258-4862-aeda-4c06647d490f" (UID: "9b4b632b-b258-4862-aeda-4c06647d490f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.285579 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8" (OuterVolumeSpecName: "kube-api-access-vltm8") pod "9b4b632b-b258-4862-aeda-4c06647d490f" (UID: "9b4b632b-b258-4862-aeda-4c06647d490f"). InnerVolumeSpecName "kube-api-access-vltm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.384982 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vltm8\" (UniqueName: \"kubernetes.io/projected/9b4b632b-b258-4862-aeda-4c06647d490f-kube-api-access-vltm8\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.385016 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b4b632b-b258-4862-aeda-4c06647d490f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.387695 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.418007 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.804924 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-f7jtj" event={"ID":"9b4b632b-b258-4862-aeda-4c06647d490f","Type":"ContainerDied","Data":"a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4"} Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.805188 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7ea33a0f32e03bd90d91a00e772cf569b4dbd4164cba1c5ce9be9f03ac726d4" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.805261 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-f7jtj" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.807195 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-xhj8d" event={"ID":"989d7793-3fc8-45d9-83dc-30f02a1e7876","Type":"ContainerStarted","Data":"c8831cd02094c059d64e2b0b73727f8a38e750042b399fae2589c9c1ef247f6e"} Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.807283 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-xhj8d" event={"ID":"989d7793-3fc8-45d9-83dc-30f02a1e7876","Type":"ContainerStarted","Data":"d0dc6734d7f1acdf87f79f3826f6531af74c0627c526f0bac5fe00060682a43a"} Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.808883 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-0fbc-account-create-xmjgc" event={"ID":"2d10c475-c29a-43b7-8607-71679b6109eb","Type":"ContainerStarted","Data":"dd0956e821e6f19d11fb5cbf2c6af0f22aa842ff6dbf890895e4fbf64181588e"} Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.808934 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-0fbc-account-create-xmjgc" event={"ID":"2d10c475-c29a-43b7-8607-71679b6109eb","Type":"ContainerStarted","Data":"a9ce14c65a07dbd7a4693a5bf54ca0e3df997e811e821978c28c02fc97c6eca0"} Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.829243 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-create-xhj8d" podStartSLOduration=4.829186986 podStartE2EDuration="4.829186986s" podCreationTimestamp="2025-11-22 07:40:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:17.823259032 +0000 UTC m=+1754.932713045" watchObservedRunningTime="2025-11-22 07:40:17.829186986 +0000 UTC m=+1754.938641039" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.848751 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-0fbc-account-create-xmjgc" podStartSLOduration=4.848729671 podStartE2EDuration="4.848729671s" podCreationTimestamp="2025-11-22 07:40:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:17.840736979 +0000 UTC m=+1754.950190992" watchObservedRunningTime="2025-11-22 07:40:17.848729671 +0000 UTC m=+1754.958183684" Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.868590 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xh9z8"] Nov 22 07:40:17 crc kubenswrapper[4929]: W1122 07:40:17.871405 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65cd3e57_02b6_46a6_a3bf_2fad9cf13a2f.slice/crio-07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258 WatchSource:0}: Error finding container 07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258: Status 404 returned error can't find the container with id 07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258 Nov 22 07:40:17 crc kubenswrapper[4929]: I1122 07:40:17.971122 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-618d-account-create-prpvg"] Nov 22 07:40:17 crc kubenswrapper[4929]: W1122 07:40:17.979634 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod57adf361_307d_4f8c_817c_6dcb54a40b4c.slice/crio-036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7 WatchSource:0}: Error finding container 036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7: Status 404 returned error can't find the container with id 036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7 Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.207865 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5t55t" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.220849 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.232406 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.311313 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts\") pod \"49469bee-1b64-40ca-a873-bf08fc285efa\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.311398 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vwvz\" (UniqueName: \"kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz\") pod \"49469bee-1b64-40ca-a873-bf08fc285efa\" (UID: \"49469bee-1b64-40ca-a873-bf08fc285efa\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.312012 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "49469bee-1b64-40ca-a873-bf08fc285efa" (UID: "49469bee-1b64-40ca-a873-bf08fc285efa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.316063 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz" (OuterVolumeSpecName: "kube-api-access-6vwvz") pod "49469bee-1b64-40ca-a873-bf08fc285efa" (UID: "49469bee-1b64-40ca-a873-bf08fc285efa"). InnerVolumeSpecName "kube-api-access-6vwvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.414042 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts\") pod \"8043acad-43a7-4549-b279-b9545f71283a\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.414634 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kc29j\" (UniqueName: \"kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j\") pod \"8043acad-43a7-4549-b279-b9545f71283a\" (UID: \"8043acad-43a7-4549-b279-b9545f71283a\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.414574 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8043acad-43a7-4549-b279-b9545f71283a" (UID: "8043acad-43a7-4549-b279-b9545f71283a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.414723 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vb95p\" (UniqueName: \"kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p\") pod \"757783e8-e867-426d-94e0-952fd124f60c\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.415174 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts\") pod \"757783e8-e867-426d-94e0-952fd124f60c\" (UID: \"757783e8-e867-426d-94e0-952fd124f60c\") " Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.415782 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "757783e8-e867-426d-94e0-952fd124f60c" (UID: "757783e8-e867-426d-94e0-952fd124f60c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.416036 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/757783e8-e867-426d-94e0-952fd124f60c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.416055 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/49469bee-1b64-40ca-a873-bf08fc285efa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.416069 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vwvz\" (UniqueName: \"kubernetes.io/projected/49469bee-1b64-40ca-a873-bf08fc285efa-kube-api-access-6vwvz\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.416086 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8043acad-43a7-4549-b279-b9545f71283a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.417549 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j" (OuterVolumeSpecName: "kube-api-access-kc29j") pod "8043acad-43a7-4549-b279-b9545f71283a" (UID: "8043acad-43a7-4549-b279-b9545f71283a"). InnerVolumeSpecName "kube-api-access-kc29j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.418712 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p" (OuterVolumeSpecName: "kube-api-access-vb95p") pod "757783e8-e867-426d-94e0-952fd124f60c" (UID: "757783e8-e867-426d-94e0-952fd124f60c"). InnerVolumeSpecName "kube-api-access-vb95p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.518014 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kc29j\" (UniqueName: \"kubernetes.io/projected/8043acad-43a7-4549-b279-b9545f71283a-kube-api-access-kc29j\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.518087 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vb95p\" (UniqueName: \"kubernetes.io/projected/757783e8-e867-426d-94e0-952fd124f60c-kube-api-access-vb95p\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.819968 4929 generic.go:334] "Generic (PLEG): container finished" podID="2d10c475-c29a-43b7-8607-71679b6109eb" containerID="dd0956e821e6f19d11fb5cbf2c6af0f22aa842ff6dbf890895e4fbf64181588e" exitCode=0 Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.820836 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-0fbc-account-create-xmjgc" event={"ID":"2d10c475-c29a-43b7-8607-71679b6109eb","Type":"ContainerDied","Data":"dd0956e821e6f19d11fb5cbf2c6af0f22aa842ff6dbf890895e4fbf64181588e"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.826146 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-5t55t" event={"ID":"49469bee-1b64-40ca-a873-bf08fc285efa","Type":"ContainerDied","Data":"762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.826204 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="762855af255f861b50f42571d980807060687835349098da288bd6d0ba30dc98" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.826155 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-5t55t" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.828880 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-b7aa-account-create-6jf49" event={"ID":"8043acad-43a7-4549-b279-b9545f71283a","Type":"ContainerDied","Data":"4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.828935 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fdedc856fa5be5cbc2a94d0c04ace8bf88602dae56e355a71393a5a31706502" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.829012 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-b7aa-account-create-6jf49" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.834178 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-618d-account-create-prpvg" event={"ID":"57adf361-307d-4f8c-817c-6dcb54a40b4c","Type":"ContainerStarted","Data":"fafbfdf30281c09cd4e03b4344099c0de6b1547f4c7fb118c23684d14d90e92f"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.834259 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-618d-account-create-prpvg" event={"ID":"57adf361-307d-4f8c-817c-6dcb54a40b4c","Type":"ContainerStarted","Data":"036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.838804 4929 generic.go:334] "Generic (PLEG): container finished" podID="989d7793-3fc8-45d9-83dc-30f02a1e7876" containerID="c8831cd02094c059d64e2b0b73727f8a38e750042b399fae2589c9c1ef247f6e" exitCode=0 Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.838910 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-xhj8d" event={"ID":"989d7793-3fc8-45d9-83dc-30f02a1e7876","Type":"ContainerDied","Data":"c8831cd02094c059d64e2b0b73727f8a38e750042b399fae2589c9c1ef247f6e"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.843923 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e669-account-create-phsc5" event={"ID":"757783e8-e867-426d-94e0-952fd124f60c","Type":"ContainerDied","Data":"00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.843981 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00704159e69d22d3eb7fbce2858e41aaf500a94e31977e68f2b3976effe8c198" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.843984 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e669-account-create-phsc5" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.860520 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xh9z8" event={"ID":"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f","Type":"ContainerStarted","Data":"df53ad4ef7cc65b0c3079a17dc182f1fb4c41565d2152189f330e4cad1788a74"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.860559 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xh9z8" event={"ID":"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f","Type":"ContainerStarted","Data":"07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258"} Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.904730 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-618d-account-create-prpvg" podStartSLOduration=2.904704513 podStartE2EDuration="2.904704513s" podCreationTimestamp="2025-11-22 07:40:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:18.874577918 +0000 UTC m=+1755.984031931" watchObservedRunningTime="2025-11-22 07:40:18.904704513 +0000 UTC m=+1756.014158536" Nov 22 07:40:18 crc kubenswrapper[4929]: I1122 07:40:18.927635 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-xh9z8" podStartSLOduration=2.927616934 podStartE2EDuration="2.927616934s" podCreationTimestamp="2025-11-22 07:40:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:18.912758026 +0000 UTC m=+1756.022212039" watchObservedRunningTime="2025-11-22 07:40:18.927616934 +0000 UTC m=+1756.037070967" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.615257 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-wp4fv"] Nov 22 07:40:19 crc kubenswrapper[4929]: E1122 07:40:19.616558 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b4b632b-b258-4862-aeda-4c06647d490f" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.616624 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b4b632b-b258-4862-aeda-4c06647d490f" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: E1122 07:40:19.616699 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8043acad-43a7-4549-b279-b9545f71283a" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.616749 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8043acad-43a7-4549-b279-b9545f71283a" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: E1122 07:40:19.616839 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="757783e8-e867-426d-94e0-952fd124f60c" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.616888 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="757783e8-e867-426d-94e0-952fd124f60c" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: E1122 07:40:19.616943 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49469bee-1b64-40ca-a873-bf08fc285efa" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.617006 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="49469bee-1b64-40ca-a873-bf08fc285efa" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.617208 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="757783e8-e867-426d-94e0-952fd124f60c" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.617283 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="49469bee-1b64-40ca-a873-bf08fc285efa" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.617332 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b4b632b-b258-4862-aeda-4c06647d490f" containerName="mariadb-database-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.617382 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8043acad-43a7-4549-b279-b9545f71283a" containerName="mariadb-account-create" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.623190 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.624230 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-wp4fv"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.638196 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.638474 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czpqp\" (UniqueName: \"kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.727023 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-x5n9p"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.728414 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.740325 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.740493 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czpqp\" (UniqueName: \"kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.740699 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zttvt\" (UniqueName: \"kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.740923 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.741587 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.743379 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-245c-account-create-vgxsx"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.745077 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.746704 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.755533 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-x5n9p"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.770084 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czpqp\" (UniqueName: \"kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp\") pod \"cinder-db-create-wp4fv\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.776414 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-245c-account-create-vgxsx"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.837476 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-9x644"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.839970 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.842016 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843171 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zttvt\" (UniqueName: \"kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843231 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzkqn\" (UniqueName: \"kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843291 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843327 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843356 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blq2j\" (UniqueName: \"kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.843984 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.845130 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-cbf9-account-create-6dk52"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.846831 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.848442 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.852292 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9x644"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.858888 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-cbf9-account-create-6dk52"] Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.861881 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zttvt\" (UniqueName: \"kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt\") pod \"barbican-db-create-x5n9p\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.881607 4929 generic.go:334] "Generic (PLEG): container finished" podID="57adf361-307d-4f8c-817c-6dcb54a40b4c" containerID="fafbfdf30281c09cd4e03b4344099c0de6b1547f4c7fb118c23684d14d90e92f" exitCode=0 Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.881663 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-618d-account-create-prpvg" event={"ID":"57adf361-307d-4f8c-817c-6dcb54a40b4c","Type":"ContainerDied","Data":"fafbfdf30281c09cd4e03b4344099c0de6b1547f4c7fb118c23684d14d90e92f"} Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.884007 4929 generic.go:334] "Generic (PLEG): container finished" podID="65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" containerID="df53ad4ef7cc65b0c3079a17dc182f1fb4c41565d2152189f330e4cad1788a74" exitCode=0 Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.884168 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xh9z8" event={"ID":"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f","Type":"ContainerDied","Data":"df53ad4ef7cc65b0c3079a17dc182f1fb4c41565d2152189f330e4cad1788a74"} Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.944719 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl552\" (UniqueName: \"kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.945422 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.945597 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzkqn\" (UniqueName: \"kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.945661 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.945804 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.945941 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.946291 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blq2j\" (UniqueName: \"kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.947156 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.947240 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.961852 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blq2j\" (UniqueName: \"kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j\") pod \"barbican-245c-account-create-vgxsx\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:19 crc kubenswrapper[4929]: I1122 07:40:19.964175 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzkqn\" (UniqueName: \"kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn\") pod \"neutron-db-create-9x644\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " pod="openstack/neutron-db-create-9x644" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.029228 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-01c3-account-create-s4ms5"] Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.030281 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.032659 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.040417 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-01c3-account-create-s4ms5"] Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.048758 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl552\" (UniqueName: \"kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.049904 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.052507 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.051348 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.053489 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.053571 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn2h9\" (UniqueName: \"kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.068499 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl552\" (UniqueName: \"kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552\") pod \"cinder-cbf9-account-create-6dk52\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.123654 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.154918 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.154960 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn2h9\" (UniqueName: \"kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.156326 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.162609 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9x644" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.172959 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn2h9\" (UniqueName: \"kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9\") pod \"neutron-01c3-account-create-s4ms5\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.230621 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.444239 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.455867 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-wp4fv"] Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.488824 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.492992 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.546958 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-x5n9p"] Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.566283 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjr52\" (UniqueName: \"kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52\") pod \"989d7793-3fc8-45d9-83dc-30f02a1e7876\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.566385 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts\") pod \"989d7793-3fc8-45d9-83dc-30f02a1e7876\" (UID: \"989d7793-3fc8-45d9-83dc-30f02a1e7876\") " Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.566610 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts\") pod \"2d10c475-c29a-43b7-8607-71679b6109eb\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.566805 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9fwt\" (UniqueName: \"kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt\") pod \"2d10c475-c29a-43b7-8607-71679b6109eb\" (UID: \"2d10c475-c29a-43b7-8607-71679b6109eb\") " Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.567768 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2d10c475-c29a-43b7-8607-71679b6109eb" (UID: "2d10c475-c29a-43b7-8607-71679b6109eb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.567766 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "989d7793-3fc8-45d9-83dc-30f02a1e7876" (UID: "989d7793-3fc8-45d9-83dc-30f02a1e7876"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.569504 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52" (OuterVolumeSpecName: "kube-api-access-hjr52") pod "989d7793-3fc8-45d9-83dc-30f02a1e7876" (UID: "989d7793-3fc8-45d9-83dc-30f02a1e7876"). InnerVolumeSpecName "kube-api-access-hjr52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.570784 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt" (OuterVolumeSpecName: "kube-api-access-r9fwt") pod "2d10c475-c29a-43b7-8607-71679b6109eb" (UID: "2d10c475-c29a-43b7-8607-71679b6109eb"). InnerVolumeSpecName "kube-api-access-r9fwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.668906 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9fwt\" (UniqueName: \"kubernetes.io/projected/2d10c475-c29a-43b7-8607-71679b6109eb-kube-api-access-r9fwt\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.669152 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjr52\" (UniqueName: \"kubernetes.io/projected/989d7793-3fc8-45d9-83dc-30f02a1e7876-kube-api-access-hjr52\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.669161 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/989d7793-3fc8-45d9-83dc-30f02a1e7876-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.669170 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d10c475-c29a-43b7-8607-71679b6109eb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.895033 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-xhj8d" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.895024 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-xhj8d" event={"ID":"989d7793-3fc8-45d9-83dc-30f02a1e7876","Type":"ContainerDied","Data":"d0dc6734d7f1acdf87f79f3826f6531af74c0627c526f0bac5fe00060682a43a"} Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.895167 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0dc6734d7f1acdf87f79f3826f6531af74c0627c526f0bac5fe00060682a43a" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.896116 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wp4fv" event={"ID":"c3378093-f835-4a48-9c94-0196360494db","Type":"ContainerStarted","Data":"cca17e817096a325c434639eb38d95309de9ebfd09c7a5233b0d99359d6a842f"} Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.897297 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-0fbc-account-create-xmjgc" event={"ID":"2d10c475-c29a-43b7-8607-71679b6109eb","Type":"ContainerDied","Data":"a9ce14c65a07dbd7a4693a5bf54ca0e3df997e811e821978c28c02fc97c6eca0"} Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.897326 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9ce14c65a07dbd7a4693a5bf54ca0e3df997e811e821978c28c02fc97c6eca0" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.897353 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-0fbc-account-create-xmjgc" Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.897956 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-x5n9p" event={"ID":"4f9656a5-c30f-414d-a694-1a34468a3040","Type":"ContainerStarted","Data":"bbbbbc0cffba9969a2424a60ae574e5c72fee09e72f2b987c553c2487989c9a5"} Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.899024 4929 generic.go:334] "Generic (PLEG): container finished" podID="fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4" containerID="99d1638126f7d8c3f4efe9ff5b703fffdccd0e155db3d9aede5e34e096262216" exitCode=0 Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.899200 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerDied","Data":"99d1638126f7d8c3f4efe9ff5b703fffdccd0e155db3d9aede5e34e096262216"} Nov 22 07:40:20 crc kubenswrapper[4929]: W1122 07:40:20.951342 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9986f3d9_5c31_45c9_be65_411d993ac709.slice/crio-43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6 WatchSource:0}: Error finding container 43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6: Status 404 returned error can't find the container with id 43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6 Nov 22 07:40:20 crc kubenswrapper[4929]: I1122 07:40:20.952142 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-245c-account-create-vgxsx"] Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.040420 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-01c3-account-create-s4ms5"] Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.066345 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-cbf9-account-create-6dk52"] Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.072019 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-9x644"] Nov 22 07:40:21 crc kubenswrapper[4929]: W1122 07:40:21.092513 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27746c3a_116c_4c6d_9697_9f44777a65be.slice/crio-a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d WatchSource:0}: Error finding container a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d: Status 404 returned error can't find the container with id a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d Nov 22 07:40:21 crc kubenswrapper[4929]: W1122 07:40:21.112693 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded5c1073_5bff_4d40_82fe_014200e5d8ca.slice/crio-d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94 WatchSource:0}: Error finding container d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94: Status 404 returned error can't find the container with id d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94 Nov 22 07:40:21 crc kubenswrapper[4929]: E1122 07:40:21.246864 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d10c475_c29a_43b7_8607_71679b6109eb.slice/crio-a9ce14c65a07dbd7a4693a5bf54ca0e3df997e811e821978c28c02fc97c6eca0\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod989d7793_3fc8_45d9_83dc_30f02a1e7876.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod989d7793_3fc8_45d9_83dc_30f02a1e7876.slice/crio-d0dc6734d7f1acdf87f79f3826f6531af74c0627c526f0bac5fe00060682a43a\": RecentStats: unable to find data in memory cache]" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.261666 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.332536 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.389646 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-55rmr\" (UniqueName: \"kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr\") pod \"57adf361-307d-4f8c-817c-6dcb54a40b4c\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.389709 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts\") pod \"57adf361-307d-4f8c-817c-6dcb54a40b4c\" (UID: \"57adf361-307d-4f8c-817c-6dcb54a40b4c\") " Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.390706 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "57adf361-307d-4f8c-817c-6dcb54a40b4c" (UID: "57adf361-307d-4f8c-817c-6dcb54a40b4c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.433717 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr" (OuterVolumeSpecName: "kube-api-access-55rmr") pod "57adf361-307d-4f8c-817c-6dcb54a40b4c" (UID: "57adf361-307d-4f8c-817c-6dcb54a40b4c"). InnerVolumeSpecName "kube-api-access-55rmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.490882 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jxbq\" (UniqueName: \"kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq\") pod \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.491093 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts\") pod \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\" (UID: \"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f\") " Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.491462 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-55rmr\" (UniqueName: \"kubernetes.io/projected/57adf361-307d-4f8c-817c-6dcb54a40b4c-kube-api-access-55rmr\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.491479 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/57adf361-307d-4f8c-817c-6dcb54a40b4c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.491797 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" (UID: "65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.514068 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq" (OuterVolumeSpecName: "kube-api-access-4jxbq") pod "65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" (UID: "65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f"). InnerVolumeSpecName "kube-api-access-4jxbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.595305 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.595427 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jxbq\" (UniqueName: \"kubernetes.io/projected/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f-kube-api-access-4jxbq\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.934746 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-618d-account-create-prpvg" event={"ID":"57adf361-307d-4f8c-817c-6dcb54a40b4c","Type":"ContainerDied","Data":"036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7"} Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.935167 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="036c83eee022158fa778b0aca039c9da677ec712c53b86722be94d68f9bffea7" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.936847 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-618d-account-create-prpvg" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.961030 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xh9z8" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.967655 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9x644" event={"ID":"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89","Type":"ContainerStarted","Data":"20d7a164d4131a7c2af37b0ba762dc70aa5134a242b1e045e82d3bdcf12fa0e3"} Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.967703 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xh9z8" event={"ID":"65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f","Type":"ContainerDied","Data":"07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258"} Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.967724 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07a2de11580a66a3983a9c151fcde70f4b714562751546a8dbd0a0e91480f258" Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.972599 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01c3-account-create-s4ms5" event={"ID":"ed5c1073-5bff-4d40-82fe-014200e5d8ca","Type":"ContainerStarted","Data":"d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94"} Nov 22 07:40:21 crc kubenswrapper[4929]: I1122 07:40:21.981585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wp4fv" event={"ID":"c3378093-f835-4a48-9c94-0196360494db","Type":"ContainerStarted","Data":"250f1418d2c1d6ef793feadac1b10f27be8417e419bf9b938cf1829f18ad9a11"} Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.011961 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-x5n9p" event={"ID":"4f9656a5-c30f-414d-a694-1a34468a3040","Type":"ContainerStarted","Data":"4af3f6661afeac3bd634a380552e2a22b3c963c40c8d2721376c5cf67ea2ad45"} Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.020657 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-wp4fv" podStartSLOduration=3.020632586 podStartE2EDuration="3.020632586s" podCreationTimestamp="2025-11-22 07:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:21.996328593 +0000 UTC m=+1759.105782606" watchObservedRunningTime="2025-11-22 07:40:22.020632586 +0000 UTC m=+1759.130086599" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.020954 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-245c-account-create-vgxsx" event={"ID":"9986f3d9-5c31-45c9-be65-411d993ac709","Type":"ContainerStarted","Data":"8d8e41a5e003e60b99e57a4fe2d097fe40985da0cc3ce4066a0b88a86e0ff1ea"} Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.020988 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-245c-account-create-vgxsx" event={"ID":"9986f3d9-5c31-45c9-be65-411d993ac709","Type":"ContainerStarted","Data":"43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6"} Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.024072 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-cbf9-account-create-6dk52" event={"ID":"27746c3a-116c-4c6d-9697-9f44777a65be","Type":"ContainerStarted","Data":"a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d"} Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.038609 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-x5n9p" podStartSLOduration=3.038586064 podStartE2EDuration="3.038586064s" podCreationTimestamp="2025-11-22 07:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:22.031909192 +0000 UTC m=+1759.141363205" watchObservedRunningTime="2025-11-22 07:40:22.038586064 +0000 UTC m=+1759.148040077" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.052510 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-245c-account-create-vgxsx" podStartSLOduration=3.05248952 podStartE2EDuration="3.05248952s" podCreationTimestamp="2025-11-22 07:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:22.047336943 +0000 UTC m=+1759.156790966" watchObservedRunningTime="2025-11-22 07:40:22.05248952 +0000 UTC m=+1759.161943543" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.083660 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-7tkl7"] Nov 22 07:40:22 crc kubenswrapper[4929]: E1122 07:40:22.084127 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57adf361-307d-4f8c-817c-6dcb54a40b4c" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.084151 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="57adf361-307d-4f8c-817c-6dcb54a40b4c" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: E1122 07:40:22.084175 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d10c475-c29a-43b7-8607-71679b6109eb" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.084182 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d10c475-c29a-43b7-8607-71679b6109eb" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: E1122 07:40:22.084195 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="989d7793-3fc8-45d9-83dc-30f02a1e7876" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.084203 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="989d7793-3fc8-45d9-83dc-30f02a1e7876" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: E1122 07:40:22.085384 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.085421 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.085635 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d10c475-c29a-43b7-8607-71679b6109eb" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.085650 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="989d7793-3fc8-45d9-83dc-30f02a1e7876" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.085661 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" containerName="mariadb-database-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.085676 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="57adf361-307d-4f8c-817c-6dcb54a40b4c" containerName="mariadb-account-create" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.086605 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.092974 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.093133 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.104962 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jg2tk" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.105342 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.133877 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7tkl7"] Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.206255 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.206350 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99n9b\" (UniqueName: \"kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.206374 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.307828 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.307901 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99n9b\" (UniqueName: \"kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.307921 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.320957 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.321051 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.342714 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99n9b\" (UniqueName: \"kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b\") pod \"keystone-db-sync-7tkl7\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.544995 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:40:22 crc kubenswrapper[4929]: I1122 07:40:22.817873 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-7tkl7"] Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.033332 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01c3-account-create-s4ms5" event={"ID":"ed5c1073-5bff-4d40-82fe-014200e5d8ca","Type":"ContainerStarted","Data":"c8153cd03e5828456a0438210663b0626a9287f391dd96f1367150607ed8e9ca"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.035197 4929 generic.go:334] "Generic (PLEG): container finished" podID="c3378093-f835-4a48-9c94-0196360494db" containerID="250f1418d2c1d6ef793feadac1b10f27be8417e419bf9b938cf1829f18ad9a11" exitCode=0 Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.035253 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wp4fv" event={"ID":"c3378093-f835-4a48-9c94-0196360494db","Type":"ContainerDied","Data":"250f1418d2c1d6ef793feadac1b10f27be8417e419bf9b938cf1829f18ad9a11"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.036839 4929 generic.go:334] "Generic (PLEG): container finished" podID="4f9656a5-c30f-414d-a694-1a34468a3040" containerID="4af3f6661afeac3bd634a380552e2a22b3c963c40c8d2721376c5cf67ea2ad45" exitCode=0 Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.036902 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-x5n9p" event={"ID":"4f9656a5-c30f-414d-a694-1a34468a3040","Type":"ContainerDied","Data":"4af3f6661afeac3bd634a380552e2a22b3c963c40c8d2721376c5cf67ea2ad45"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.038545 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-cbf9-account-create-6dk52" event={"ID":"27746c3a-116c-4c6d-9697-9f44777a65be","Type":"ContainerStarted","Data":"78fd18e6de2c93f1ef21e8e3fbe08d40ffb8d363a02ef32525a8195ddc90ec18"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.043079 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerStarted","Data":"2b83fe9be535644bd214c6a08b751440b755d48081f652cffeb7dfb4853455c2"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.046977 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7tkl7" event={"ID":"f311300e-5d96-4731-99a1-1e072280db75","Type":"ContainerStarted","Data":"14c3f081825b0d1688754194974124560d9efac173d14e974a310dab3676cb67"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.058425 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-01c3-account-create-s4ms5" podStartSLOduration=3.058404894 podStartE2EDuration="3.058404894s" podCreationTimestamp="2025-11-22 07:40:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:23.049707616 +0000 UTC m=+1760.159161629" watchObservedRunningTime="2025-11-22 07:40:23.058404894 +0000 UTC m=+1760.167858907" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.060397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9x644" event={"ID":"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89","Type":"ContainerStarted","Data":"6d780ed71d07b9618e7e34ad3770d037b80d4c84018063b1f0c5fdb8d3740ef8"} Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.075812 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-cbf9-account-create-6dk52" podStartSLOduration=4.07579292 podStartE2EDuration="4.07579292s" podCreationTimestamp="2025-11-22 07:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:23.067577833 +0000 UTC m=+1760.177031846" watchObservedRunningTime="2025-11-22 07:40:23.07579292 +0000 UTC m=+1760.185246933" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.110986 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-9x644" podStartSLOduration=4.110964419 podStartE2EDuration="4.110964419s" podCreationTimestamp="2025-11-22 07:40:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:23.104965693 +0000 UTC m=+1760.214419706" watchObservedRunningTime="2025-11-22 07:40:23.110964419 +0000 UTC m=+1760.220418422" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.841131 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-qnmbh"] Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.842813 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.851604 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-wlm2p" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.851818 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.852648 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qnmbh"] Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.953364 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.953421 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.953444 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:23 crc kubenswrapper[4929]: I1122 07:40:23.953574 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nzrp\" (UniqueName: \"kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.055386 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nzrp\" (UniqueName: \"kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.055871 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.056914 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.056965 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.061961 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.063867 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.063868 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.075895 4929 generic.go:334] "Generic (PLEG): container finished" podID="9986f3d9-5c31-45c9-be65-411d993ac709" containerID="8d8e41a5e003e60b99e57a4fe2d097fe40985da0cc3ce4066a0b88a86e0ff1ea" exitCode=0 Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.075954 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-245c-account-create-vgxsx" event={"ID":"9986f3d9-5c31-45c9-be65-411d993ac709","Type":"ContainerDied","Data":"8d8e41a5e003e60b99e57a4fe2d097fe40985da0cc3ce4066a0b88a86e0ff1ea"} Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.078685 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nzrp\" (UniqueName: \"kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp\") pod \"watcher-db-sync-qnmbh\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.215225 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.317645 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:24 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:24 crc kubenswrapper[4929]: > Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.695314 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.702009 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.757224 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-qnmbh"] Nov 22 07:40:24 crc kubenswrapper[4929]: W1122 07:40:24.758296 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee12ebde_6711_471d_b83c_649a0523ce63.slice/crio-275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93 WatchSource:0}: Error finding container 275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93: Status 404 returned error can't find the container with id 275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93 Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.870612 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czpqp\" (UniqueName: \"kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp\") pod \"c3378093-f835-4a48-9c94-0196360494db\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.870795 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zttvt\" (UniqueName: \"kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt\") pod \"4f9656a5-c30f-414d-a694-1a34468a3040\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.870862 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts\") pod \"4f9656a5-c30f-414d-a694-1a34468a3040\" (UID: \"4f9656a5-c30f-414d-a694-1a34468a3040\") " Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.870909 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts\") pod \"c3378093-f835-4a48-9c94-0196360494db\" (UID: \"c3378093-f835-4a48-9c94-0196360494db\") " Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.871997 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c3378093-f835-4a48-9c94-0196360494db" (UID: "c3378093-f835-4a48-9c94-0196360494db"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.871998 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4f9656a5-c30f-414d-a694-1a34468a3040" (UID: "4f9656a5-c30f-414d-a694-1a34468a3040"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.877759 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt" (OuterVolumeSpecName: "kube-api-access-zttvt") pod "4f9656a5-c30f-414d-a694-1a34468a3040" (UID: "4f9656a5-c30f-414d-a694-1a34468a3040"). InnerVolumeSpecName "kube-api-access-zttvt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.878439 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp" (OuterVolumeSpecName: "kube-api-access-czpqp") pod "c3378093-f835-4a48-9c94-0196360494db" (UID: "c3378093-f835-4a48-9c94-0196360494db"). InnerVolumeSpecName "kube-api-access-czpqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.972431 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czpqp\" (UniqueName: \"kubernetes.io/projected/c3378093-f835-4a48-9c94-0196360494db-kube-api-access-czpqp\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.972462 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zttvt\" (UniqueName: \"kubernetes.io/projected/4f9656a5-c30f-414d-a694-1a34468a3040-kube-api-access-zttvt\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.972471 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f9656a5-c30f-414d-a694-1a34468a3040-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:24 crc kubenswrapper[4929]: I1122 07:40:24.972480 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c3378093-f835-4a48-9c94-0196360494db-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.104983 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-wp4fv" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.106475 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-wp4fv" event={"ID":"c3378093-f835-4a48-9c94-0196360494db","Type":"ContainerDied","Data":"cca17e817096a325c434639eb38d95309de9ebfd09c7a5233b0d99359d6a842f"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.106625 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cca17e817096a325c434639eb38d95309de9ebfd09c7a5233b0d99359d6a842f" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.111526 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-x5n9p" event={"ID":"4f9656a5-c30f-414d-a694-1a34468a3040","Type":"ContainerDied","Data":"bbbbbc0cffba9969a2424a60ae574e5c72fee09e72f2b987c553c2487989c9a5"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.111572 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbbbbc0cffba9969a2424a60ae574e5c72fee09e72f2b987c553c2487989c9a5" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.111550 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-x5n9p" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.114780 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qnmbh" event={"ID":"ee12ebde-6711-471d-b83c-649a0523ce63","Type":"ContainerStarted","Data":"275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.116247 4929 generic.go:334] "Generic (PLEG): container finished" podID="27746c3a-116c-4c6d-9697-9f44777a65be" containerID="78fd18e6de2c93f1ef21e8e3fbe08d40ffb8d363a02ef32525a8195ddc90ec18" exitCode=0 Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.116329 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-cbf9-account-create-6dk52" event={"ID":"27746c3a-116c-4c6d-9697-9f44777a65be","Type":"ContainerDied","Data":"78fd18e6de2c93f1ef21e8e3fbe08d40ffb8d363a02ef32525a8195ddc90ec18"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.121932 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerStarted","Data":"1299cdf7a68e679fa82a5e8d04eb52180cafea7d476f20b35d9e0bdbb4e0cb9a"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.123818 4929 generic.go:334] "Generic (PLEG): container finished" podID="5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" containerID="6d780ed71d07b9618e7e34ad3770d037b80d4c84018063b1f0c5fdb8d3740ef8" exitCode=0 Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.123905 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9x644" event={"ID":"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89","Type":"ContainerDied","Data":"6d780ed71d07b9618e7e34ad3770d037b80d4c84018063b1f0c5fdb8d3740ef8"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.125561 4929 generic.go:334] "Generic (PLEG): container finished" podID="ed5c1073-5bff-4d40-82fe-014200e5d8ca" containerID="c8153cd03e5828456a0438210663b0626a9287f391dd96f1367150607ed8e9ca" exitCode=0 Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.125588 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01c3-account-create-s4ms5" event={"ID":"ed5c1073-5bff-4d40-82fe-014200e5d8ca","Type":"ContainerDied","Data":"c8153cd03e5828456a0438210663b0626a9287f391dd96f1367150607ed8e9ca"} Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.430914 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.583128 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blq2j\" (UniqueName: \"kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j\") pod \"9986f3d9-5c31-45c9-be65-411d993ac709\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.583316 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts\") pod \"9986f3d9-5c31-45c9-be65-411d993ac709\" (UID: \"9986f3d9-5c31-45c9-be65-411d993ac709\") " Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.584565 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9986f3d9-5c31-45c9-be65-411d993ac709" (UID: "9986f3d9-5c31-45c9-be65-411d993ac709"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.618154 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j" (OuterVolumeSpecName: "kube-api-access-blq2j") pod "9986f3d9-5c31-45c9-be65-411d993ac709" (UID: "9986f3d9-5c31-45c9-be65-411d993ac709"). InnerVolumeSpecName "kube-api-access-blq2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.685075 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9986f3d9-5c31-45c9-be65-411d993ac709-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:25 crc kubenswrapper[4929]: I1122 07:40:25.685109 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blq2j\" (UniqueName: \"kubernetes.io/projected/9986f3d9-5c31-45c9-be65-411d993ac709-kube-api-access-blq2j\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:26 crc kubenswrapper[4929]: I1122 07:40:26.140528 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-245c-account-create-vgxsx" event={"ID":"9986f3d9-5c31-45c9-be65-411d993ac709","Type":"ContainerDied","Data":"43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6"} Nov 22 07:40:26 crc kubenswrapper[4929]: I1122 07:40:26.140567 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43c294fe6e20164bc088ff41c68d4c68f42df57e11b4cc7b73f294f3952f39d6" Nov 22 07:40:26 crc kubenswrapper[4929]: I1122 07:40:26.140566 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-245c-account-create-vgxsx" Nov 22 07:40:26 crc kubenswrapper[4929]: I1122 07:40:26.145660 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4","Type":"ContainerStarted","Data":"98979f41bd7638d813d539655f699bda1e67b60eb80eb51bd015a5ef1b160565"} Nov 22 07:40:26 crc kubenswrapper[4929]: I1122 07:40:26.179089 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=17.179066245 podStartE2EDuration="17.179066245s" podCreationTimestamp="2025-11-22 07:40:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:40:26.174574023 +0000 UTC m=+1763.284028046" watchObservedRunningTime="2025-11-22 07:40:26.179066245 +0000 UTC m=+1763.288520258" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.247952 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-q7499"] Nov 22 07:40:27 crc kubenswrapper[4929]: E1122 07:40:27.248375 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f9656a5-c30f-414d-a694-1a34468a3040" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248388 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f9656a5-c30f-414d-a694-1a34468a3040" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: E1122 07:40:27.248396 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9986f3d9-5c31-45c9-be65-411d993ac709" containerName="mariadb-account-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248403 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9986f3d9-5c31-45c9-be65-411d993ac709" containerName="mariadb-account-create" Nov 22 07:40:27 crc kubenswrapper[4929]: E1122 07:40:27.248427 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3378093-f835-4a48-9c94-0196360494db" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248434 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3378093-f835-4a48-9c94-0196360494db" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248587 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="9986f3d9-5c31-45c9-be65-411d993ac709" containerName="mariadb-account-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248598 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f9656a5-c30f-414d-a694-1a34468a3040" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.248619 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3378093-f835-4a48-9c94-0196360494db" containerName="mariadb-database-create" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.249168 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.257107 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-gdhfj" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.257304 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.258178 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-q7499"] Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.315166 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj8kh\" (UniqueName: \"kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.315239 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.315287 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.315387 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.417392 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj8kh\" (UniqueName: \"kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.417447 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.417776 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.418135 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.423019 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.426591 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.436074 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.441146 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj8kh\" (UniqueName: \"kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh\") pod \"glance-db-sync-q7499\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " pod="openstack/glance-db-sync-q7499" Nov 22 07:40:27 crc kubenswrapper[4929]: I1122 07:40:27.581300 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q7499" Nov 22 07:40:29 crc kubenswrapper[4929]: I1122 07:40:29.997633 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9x644" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.012091 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.023917 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.062990 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.082157 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gl552\" (UniqueName: \"kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552\") pod \"27746c3a-116c-4c6d-9697-9f44777a65be\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.082324 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts\") pod \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.082353 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts\") pod \"27746c3a-116c-4c6d-9697-9f44777a65be\" (UID: \"27746c3a-116c-4c6d-9697-9f44777a65be\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.082381 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzkqn\" (UniqueName: \"kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn\") pod \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\" (UID: \"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.084002 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" (UID: "5a74e4b0-12f9-4d8f-a2b1-d8145c286d89"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.084015 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27746c3a-116c-4c6d-9697-9f44777a65be" (UID: "27746c3a-116c-4c6d-9697-9f44777a65be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.089161 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552" (OuterVolumeSpecName: "kube-api-access-gl552") pod "27746c3a-116c-4c6d-9697-9f44777a65be" (UID: "27746c3a-116c-4c6d-9697-9f44777a65be"). InnerVolumeSpecName "kube-api-access-gl552". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.089879 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn" (OuterVolumeSpecName: "kube-api-access-gzkqn") pod "5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" (UID: "5a74e4b0-12f9-4d8f-a2b1-d8145c286d89"). InnerVolumeSpecName "kube-api-access-gzkqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.184042 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts\") pod \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.184114 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mn2h9\" (UniqueName: \"kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9\") pod \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\" (UID: \"ed5c1073-5bff-4d40-82fe-014200e5d8ca\") " Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.184662 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed5c1073-5bff-4d40-82fe-014200e5d8ca" (UID: "ed5c1073-5bff-4d40-82fe-014200e5d8ca"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.185060 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gl552\" (UniqueName: \"kubernetes.io/projected/27746c3a-116c-4c6d-9697-9f44777a65be-kube-api-access-gl552\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.185087 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed5c1073-5bff-4d40-82fe-014200e5d8ca-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.185097 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.185106 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27746c3a-116c-4c6d-9697-9f44777a65be-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.185114 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzkqn\" (UniqueName: \"kubernetes.io/projected/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89-kube-api-access-gzkqn\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.187903 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9" (OuterVolumeSpecName: "kube-api-access-mn2h9") pod "ed5c1073-5bff-4d40-82fe-014200e5d8ca" (UID: "ed5c1073-5bff-4d40-82fe-014200e5d8ca"). InnerVolumeSpecName "kube-api-access-mn2h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.201335 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-9x644" event={"ID":"5a74e4b0-12f9-4d8f-a2b1-d8145c286d89","Type":"ContainerDied","Data":"20d7a164d4131a7c2af37b0ba762dc70aa5134a242b1e045e82d3bdcf12fa0e3"} Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.201382 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20d7a164d4131a7c2af37b0ba762dc70aa5134a242b1e045e82d3bdcf12fa0e3" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.201562 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-9x644" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.203233 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-01c3-account-create-s4ms5" event={"ID":"ed5c1073-5bff-4d40-82fe-014200e5d8ca","Type":"ContainerDied","Data":"d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94"} Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.203270 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8f78668db902e0514fa7da7ac3f7090d0e26f1d6c20b724ab1f45c7f9e48e94" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.203341 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-01c3-account-create-s4ms5" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.208226 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-cbf9-account-create-6dk52" event={"ID":"27746c3a-116c-4c6d-9697-9f44777a65be","Type":"ContainerDied","Data":"a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d"} Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.208256 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a7c6402cf8e0e1d96e83da27b106c419417a291c3b953a4b5f5fded8e3a4640d" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.208259 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-cbf9-account-create-6dk52" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.286980 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mn2h9\" (UniqueName: \"kubernetes.io/projected/ed5c1073-5bff-4d40-82fe-014200e5d8ca-kube-api-access-mn2h9\") on node \"crc\" DevicePath \"\"" Nov 22 07:40:30 crc kubenswrapper[4929]: E1122 07:40:30.592083 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[etc-swift], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="openstack/swift-storage-0" podUID="dab37299-3b8e-46d0-b6a5-044f7d4878d6" Nov 22 07:40:30 crc kubenswrapper[4929]: I1122 07:40:30.947228 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:40:30 crc kubenswrapper[4929]: E1122 07:40:30.947625 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:40:31 crc kubenswrapper[4929]: I1122 07:40:31.215935 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 07:40:33 crc kubenswrapper[4929]: I1122 07:40:33.356635 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-q7499"] Nov 22 07:40:33 crc kubenswrapper[4929]: W1122 07:40:33.836609 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a027f20_aeb5_4af3_9ccc_c4271d8717d1.slice/crio-0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80 WatchSource:0}: Error finding container 0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80: Status 404 returned error can't find the container with id 0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80 Nov 22 07:40:34 crc kubenswrapper[4929]: I1122 07:40:34.251037 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q7499" event={"ID":"8a027f20-aeb5-4af3-9ccc-c4271d8717d1","Type":"ContainerStarted","Data":"0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80"} Nov 22 07:40:34 crc kubenswrapper[4929]: I1122 07:40:34.308629 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:34 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:34 crc kubenswrapper[4929]: > Nov 22 07:40:34 crc kubenswrapper[4929]: I1122 07:40:34.363344 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:40:34 crc kubenswrapper[4929]: I1122 07:40:34.373911 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/dab37299-3b8e-46d0-b6a5-044f7d4878d6-etc-swift\") pod \"swift-storage-0\" (UID: \"dab37299-3b8e-46d0-b6a5-044f7d4878d6\") " pod="openstack/swift-storage-0" Nov 22 07:40:34 crc kubenswrapper[4929]: I1122 07:40:34.517903 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 07:40:35 crc kubenswrapper[4929]: I1122 07:40:35.260142 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7tkl7" event={"ID":"f311300e-5d96-4731-99a1-1e072280db75","Type":"ContainerStarted","Data":"4b1b298eb5c447935d1e4943ddb38c83063db90eecf04053950540937ea089a3"} Nov 22 07:40:35 crc kubenswrapper[4929]: I1122 07:40:35.280037 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-7tkl7" podStartSLOduration=2.049920839 podStartE2EDuration="13.280014211s" podCreationTimestamp="2025-11-22 07:40:22 +0000 UTC" firstStartedPulling="2025-11-22 07:40:22.85372826 +0000 UTC m=+1759.963182273" lastFinishedPulling="2025-11-22 07:40:34.083821632 +0000 UTC m=+1771.193275645" observedRunningTime="2025-11-22 07:40:35.275790405 +0000 UTC m=+1772.385244438" watchObservedRunningTime="2025-11-22 07:40:35.280014211 +0000 UTC m=+1772.389468224" Nov 22 07:40:40 crc kubenswrapper[4929]: I1122 07:40:40.063514 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:40 crc kubenswrapper[4929]: I1122 07:40:40.070402 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:40 crc kubenswrapper[4929]: I1122 07:40:40.310452 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 22 07:40:44 crc kubenswrapper[4929]: I1122 07:40:44.317280 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:44 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:44 crc kubenswrapper[4929]: > Nov 22 07:40:44 crc kubenswrapper[4929]: I1122 07:40:44.948233 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:40:44 crc kubenswrapper[4929]: E1122 07:40:44.948446 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:40:50 crc kubenswrapper[4929]: I1122 07:40:50.366748 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 07:40:50 crc kubenswrapper[4929]: W1122 07:40:50.381865 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddab37299_3b8e_46d0_b6a5_044f7d4878d6.slice/crio-d1204d39f2dcf3ccb05699c0d0084817e526198afb289c046615180e74ff8569 WatchSource:0}: Error finding container d1204d39f2dcf3ccb05699c0d0084817e526198afb289c046615180e74ff8569: Status 404 returned error can't find the container with id d1204d39f2dcf3ccb05699c0d0084817e526198afb289c046615180e74ff8569 Nov 22 07:40:50 crc kubenswrapper[4929]: I1122 07:40:50.404953 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"d1204d39f2dcf3ccb05699c0d0084817e526198afb289c046615180e74ff8569"} Nov 22 07:40:52 crc kubenswrapper[4929]: E1122 07:40:52.061149 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Nov 22 07:40:52 crc kubenswrapper[4929]: E1122 07:40:52.061481 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Nov 22 07:40:52 crc kubenswrapper[4929]: E1122 07:40:52.061605 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-db-sync,Image:38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2nzrp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-db-sync-qnmbh_openstack(ee12ebde-6711-471d-b83c-649a0523ce63): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:40:52 crc kubenswrapper[4929]: E1122 07:40:52.063426 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-db-sync-qnmbh" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" Nov 22 07:40:52 crc kubenswrapper[4929]: E1122 07:40:52.426432 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest\\\"\"" pod="openstack/watcher-db-sync-qnmbh" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" Nov 22 07:40:54 crc kubenswrapper[4929]: I1122 07:40:54.311497 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:40:54 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:40:54 crc kubenswrapper[4929]: > Nov 22 07:40:54 crc kubenswrapper[4929]: I1122 07:40:54.311591 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:40:54 crc kubenswrapper[4929]: I1122 07:40:54.312269 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="registry-server" containerStatusID={"Type":"cri-o","ID":"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099"} pod="openshift-marketplace/redhat-operators-zv95t" containerMessage="Container registry-server failed startup probe, will be restarted" Nov 22 07:40:54 crc kubenswrapper[4929]: I1122 07:40:54.312302 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" containerID="cri-o://8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099" gracePeriod=30 Nov 22 07:40:59 crc kubenswrapper[4929]: I1122 07:40:59.947973 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:40:59 crc kubenswrapper[4929]: E1122 07:40:59.949093 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:41:02 crc kubenswrapper[4929]: E1122 07:41:02.406991 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 22 07:41:02 crc kubenswrapper[4929]: E1122 07:41:02.407452 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fj8kh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-q7499_openstack(8a027f20-aeb5-4af3-9ccc-c4271d8717d1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:41:02 crc kubenswrapper[4929]: E1122 07:41:02.408666 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-q7499" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" Nov 22 07:41:02 crc kubenswrapper[4929]: E1122 07:41:02.517410 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-q7499" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" Nov 22 07:41:14 crc kubenswrapper[4929]: I1122 07:41:14.947842 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:41:14 crc kubenswrapper[4929]: E1122 07:41:14.948699 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:41:16 crc kubenswrapper[4929]: I1122 07:41:16.640510 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qnmbh" event={"ID":"ee12ebde-6711-471d-b83c-649a0523ce63","Type":"ContainerStarted","Data":"2540f1d1957de2e49d4a114ab9af4ada0b4550fe4476d830c90becc2d4907b2f"} Nov 22 07:41:26 crc kubenswrapper[4929]: I1122 07:41:26.735046 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zv95t_2584eaad-5c38-40d2-b1da-7a6268080fd0/registry-server/0.log" Nov 22 07:41:26 crc kubenswrapper[4929]: I1122 07:41:26.736732 4929 generic.go:334] "Generic (PLEG): container finished" podID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerID="8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099" exitCode=137 Nov 22 07:41:26 crc kubenswrapper[4929]: I1122 07:41:26.736784 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerDied","Data":"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099"} Nov 22 07:41:29 crc kubenswrapper[4929]: I1122 07:41:29.947872 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:41:29 crc kubenswrapper[4929]: E1122 07:41:29.948558 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:41:44 crc kubenswrapper[4929]: I1122 07:41:44.947640 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:41:44 crc kubenswrapper[4929]: E1122 07:41:44.948428 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:41:49 crc kubenswrapper[4929]: E1122 07:41:49.333838 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-account:current-podified" Nov 22 07:41:49 crc kubenswrapper[4929]: E1122 07:41:49.334514 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:account-server,Image:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,Command:[/usr/bin/swift-account-server /etc/swift/account-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:account,HostPort:0,ContainerPort:6202,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9wcmc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(dab37299-3b8e-46d0-b6a5-044f7d4878d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:41:54 crc kubenswrapper[4929]: I1122 07:41:54.024675 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zv95t_2584eaad-5c38-40d2-b1da-7a6268080fd0/registry-server/0.log" Nov 22 07:41:54 crc kubenswrapper[4929]: I1122 07:41:54.026156 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerStarted","Data":"6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe"} Nov 22 07:41:54 crc kubenswrapper[4929]: I1122 07:41:54.034195 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q7499" event={"ID":"8a027f20-aeb5-4af3-9ccc-c4271d8717d1","Type":"ContainerStarted","Data":"5dde2d3f2168bf10e3c514fbd3d384a32beed85e1e86d848393685a49e346cca"} Nov 22 07:41:54 crc kubenswrapper[4929]: I1122 07:41:54.069764 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-q7499" podStartSLOduration=7.821815259 podStartE2EDuration="1m27.069737295s" podCreationTimestamp="2025-11-22 07:40:27 +0000 UTC" firstStartedPulling="2025-11-22 07:40:34.072407212 +0000 UTC m=+1771.181861225" lastFinishedPulling="2025-11-22 07:41:53.320329248 +0000 UTC m=+1850.429783261" observedRunningTime="2025-11-22 07:41:54.067883939 +0000 UTC m=+1851.177337992" watchObservedRunningTime="2025-11-22 07:41:54.069737295 +0000 UTC m=+1851.179191308" Nov 22 07:41:54 crc kubenswrapper[4929]: I1122 07:41:54.093202 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-qnmbh" podStartSLOduration=40.47843009 podStartE2EDuration="1m31.093172811s" podCreationTimestamp="2025-11-22 07:40:23 +0000 UTC" firstStartedPulling="2025-11-22 07:40:24.760345224 +0000 UTC m=+1761.869799227" lastFinishedPulling="2025-11-22 07:41:15.375087935 +0000 UTC m=+1812.484541948" observedRunningTime="2025-11-22 07:41:54.087151221 +0000 UTC m=+1851.196605234" watchObservedRunningTime="2025-11-22 07:41:54.093172811 +0000 UTC m=+1851.202626824" Nov 22 07:41:55 crc kubenswrapper[4929]: I1122 07:41:55.046334 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"f8c1f565c57fe09dd2aa67f6f968fe4106629e78585d2c6132781d9dedfe3766"} Nov 22 07:41:55 crc kubenswrapper[4929]: I1122 07:41:55.046617 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"a2b4b162b21c1273bdccff9380e43d8c2a9865d7249fafbea59652e5fec043fc"} Nov 22 07:41:56 crc kubenswrapper[4929]: I1122 07:41:56.057326 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"ceec9ca9fb475d5b0fdce37e5aad2d52908b094956db36a8ba489c8380a3decd"} Nov 22 07:41:56 crc kubenswrapper[4929]: I1122 07:41:56.057579 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"209096794cfe77ec47e4b72ce9a9f3ddb9344edb8350ae6b4fa2215093c15d6c"} Nov 22 07:41:58 crc kubenswrapper[4929]: I1122 07:41:58.095016 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"236eb59cca598732f7059cf5f72da6de545b712f0a93cc5ba5c0554f15f8b231"} Nov 22 07:41:58 crc kubenswrapper[4929]: I1122 07:41:58.095097 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"1c105d5e337905451cdf2660b13cbdbdf7cd9a18f8076472c04375fcad8f8415"} Nov 22 07:41:58 crc kubenswrapper[4929]: I1122 07:41:58.948302 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:41:58 crc kubenswrapper[4929]: E1122 07:41:58.948926 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:41:59 crc kubenswrapper[4929]: I1122 07:41:59.116545 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"3afcbb18fd406db6ba3c732c5a0a118d018f366a571f2f4f6117bcc721a2b734"} Nov 22 07:41:59 crc kubenswrapper[4929]: I1122 07:41:59.116590 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"9d476859c8acc7873798d03492dfe6331f0d433d12364db69a0f6fefa42e4a0c"} Nov 22 07:41:59 crc kubenswrapper[4929]: I1122 07:41:59.116600 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"462204e007786037a78241d01310b2202d1992d50cea6094239bada38a3b7ed6"} Nov 22 07:41:59 crc kubenswrapper[4929]: E1122 07:41:59.853708 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"account-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"account-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-account:current-podified\\\"\", failed to \"StartContainer\" for \"account-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-account:current-podified\\\"\", failed to \"StartContainer\" for \"account-reaper\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-account:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="dab37299-3b8e-46d0-b6a5-044f7d4878d6" Nov 22 07:42:00 crc kubenswrapper[4929]: I1122 07:42:00.129087 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"a34f28d4d4f4cd14a6d1c5d26d8bcd93968f8dcb05bbec70374c9b9a74233453"} Nov 22 07:42:00 crc kubenswrapper[4929]: I1122 07:42:00.129137 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"062d2338940d2a957d3d4c9d983467816430f10a37469d14353d8fb1b8b3ca8f"} Nov 22 07:42:02 crc kubenswrapper[4929]: I1122 07:42:02.174598 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"3890a2a9f523df1af54720b7620c2ca73bd20f3fcb4cef09d84b674c1227f34c"} Nov 22 07:42:02 crc kubenswrapper[4929]: I1122 07:42:02.175081 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"b2958858ce1e7d035a7dae209748a211618b8dd10d0b09e17799c70797f90cf3"} Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.188147 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"7a6b01e4fb64f49069ee055ebcdd60412b17e20b1ba5f3c99e206424879fcd1e"} Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.188555 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"dab37299-3b8e-46d0-b6a5-044f7d4878d6","Type":"ContainerStarted","Data":"04be4d224c3412e687ef8ae22fee638bbc12d078de1ba871d38b2248700d3d38"} Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.220192 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=269.106533985 podStartE2EDuration="5m40.220173971s" podCreationTimestamp="2025-11-22 07:36:23 +0000 UTC" firstStartedPulling="2025-11-22 07:40:50.38407738 +0000 UTC m=+1787.493531413" lastFinishedPulling="2025-11-22 07:42:01.497717376 +0000 UTC m=+1858.607171399" observedRunningTime="2025-11-22 07:42:03.217914705 +0000 UTC m=+1860.327368728" watchObservedRunningTime="2025-11-22 07:42:03.220173971 +0000 UTC m=+1860.329627984" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.261581 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.261667 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.516959 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:42:03 crc kubenswrapper[4929]: E1122 07:42:03.517609 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed5c1073-5bff-4d40-82fe-014200e5d8ca" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517637 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed5c1073-5bff-4d40-82fe-014200e5d8ca" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: E1122 07:42:03.517660 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27746c3a-116c-4c6d-9697-9f44777a65be" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517670 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="27746c3a-116c-4c6d-9697-9f44777a65be" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: E1122 07:42:03.517708 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" containerName="mariadb-database-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517716 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" containerName="mariadb-database-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517916 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="27746c3a-116c-4c6d-9697-9f44777a65be" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517944 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed5c1073-5bff-4d40-82fe-014200e5d8ca" containerName="mariadb-account-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.517967 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" containerName="mariadb-database-create" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.519445 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.523517 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.532634 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706298 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706417 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706593 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706628 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706677 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.706714 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jssz\" (UniqueName: \"kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808592 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808708 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808726 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808754 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808777 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jssz\" (UniqueName: \"kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.808808 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.809832 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.809838 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.809994 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.810068 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.810180 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.827713 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jssz\" (UniqueName: \"kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz\") pod \"dnsmasq-dns-764c5664d7-n4sjl\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:03 crc kubenswrapper[4929]: I1122 07:42:03.847674 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:04 crc kubenswrapper[4929]: I1122 07:42:04.321812 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:42:04 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:42:04 crc kubenswrapper[4929]: > Nov 22 07:42:04 crc kubenswrapper[4929]: I1122 07:42:04.373655 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:42:04 crc kubenswrapper[4929]: W1122 07:42:04.376387 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf6c6154_1378_401d_b568_cf37274933fb.slice/crio-8ef5eb293108ae985966fed0d0400d96793197618513e9b7d25253ca443096f2 WatchSource:0}: Error finding container 8ef5eb293108ae985966fed0d0400d96793197618513e9b7d25253ca443096f2: Status 404 returned error can't find the container with id 8ef5eb293108ae985966fed0d0400d96793197618513e9b7d25253ca443096f2 Nov 22 07:42:05 crc kubenswrapper[4929]: I1122 07:42:05.205575 4929 generic.go:334] "Generic (PLEG): container finished" podID="af6c6154-1378-401d-b568-cf37274933fb" containerID="cab09343d115a2448c8c76416b2d6c3072b1883ba540c203b2baee517e5cc447" exitCode=0 Nov 22 07:42:05 crc kubenswrapper[4929]: I1122 07:42:05.205665 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" event={"ID":"af6c6154-1378-401d-b568-cf37274933fb","Type":"ContainerDied","Data":"cab09343d115a2448c8c76416b2d6c3072b1883ba540c203b2baee517e5cc447"} Nov 22 07:42:05 crc kubenswrapper[4929]: I1122 07:42:05.205920 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" event={"ID":"af6c6154-1378-401d-b568-cf37274933fb","Type":"ContainerStarted","Data":"8ef5eb293108ae985966fed0d0400d96793197618513e9b7d25253ca443096f2"} Nov 22 07:42:06 crc kubenswrapper[4929]: I1122 07:42:06.214693 4929 generic.go:334] "Generic (PLEG): container finished" podID="ee12ebde-6711-471d-b83c-649a0523ce63" containerID="2540f1d1957de2e49d4a114ab9af4ada0b4550fe4476d830c90becc2d4907b2f" exitCode=0 Nov 22 07:42:06 crc kubenswrapper[4929]: I1122 07:42:06.214785 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qnmbh" event={"ID":"ee12ebde-6711-471d-b83c-649a0523ce63","Type":"ContainerDied","Data":"2540f1d1957de2e49d4a114ab9af4ada0b4550fe4476d830c90becc2d4907b2f"} Nov 22 07:42:06 crc kubenswrapper[4929]: I1122 07:42:06.216858 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" event={"ID":"af6c6154-1378-401d-b568-cf37274933fb","Type":"ContainerStarted","Data":"4b666bf0a3163d13f9cc34e907110d44f4185d201360a35ceb73de743f89276b"} Nov 22 07:42:06 crc kubenswrapper[4929]: I1122 07:42:06.216992 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:06 crc kubenswrapper[4929]: I1122 07:42:06.250100 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podStartSLOduration=3.25008435 podStartE2EDuration="3.25008435s" podCreationTimestamp="2025-11-22 07:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:42:06.248704105 +0000 UTC m=+1863.358158138" watchObservedRunningTime="2025-11-22 07:42:06.25008435 +0000 UTC m=+1863.359538363" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.552565 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.702656 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data\") pod \"ee12ebde-6711-471d-b83c-649a0523ce63\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.702732 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data\") pod \"ee12ebde-6711-471d-b83c-649a0523ce63\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.702882 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nzrp\" (UniqueName: \"kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp\") pod \"ee12ebde-6711-471d-b83c-649a0523ce63\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.702909 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle\") pod \"ee12ebde-6711-471d-b83c-649a0523ce63\" (UID: \"ee12ebde-6711-471d-b83c-649a0523ce63\") " Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.707697 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp" (OuterVolumeSpecName: "kube-api-access-2nzrp") pod "ee12ebde-6711-471d-b83c-649a0523ce63" (UID: "ee12ebde-6711-471d-b83c-649a0523ce63"). InnerVolumeSpecName "kube-api-access-2nzrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.707906 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ee12ebde-6711-471d-b83c-649a0523ce63" (UID: "ee12ebde-6711-471d-b83c-649a0523ce63"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.728265 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee12ebde-6711-471d-b83c-649a0523ce63" (UID: "ee12ebde-6711-471d-b83c-649a0523ce63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.759089 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data" (OuterVolumeSpecName: "config-data") pod "ee12ebde-6711-471d-b83c-649a0523ce63" (UID: "ee12ebde-6711-471d-b83c-649a0523ce63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.805001 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.805035 4929 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.805048 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nzrp\" (UniqueName: \"kubernetes.io/projected/ee12ebde-6711-471d-b83c-649a0523ce63-kube-api-access-2nzrp\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:07 crc kubenswrapper[4929]: I1122 07:42:07.805057 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee12ebde-6711-471d-b83c-649a0523ce63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:08 crc kubenswrapper[4929]: I1122 07:42:08.242900 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-qnmbh" event={"ID":"ee12ebde-6711-471d-b83c-649a0523ce63","Type":"ContainerDied","Data":"275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93"} Nov 22 07:42:08 crc kubenswrapper[4929]: I1122 07:42:08.243168 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="275a4d7fceba7baafdfc0d8e4ffe6a2579a4f981af864993334eb94ba5956b93" Nov 22 07:42:08 crc kubenswrapper[4929]: I1122 07:42:08.243048 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-qnmbh" Nov 22 07:42:12 crc kubenswrapper[4929]: I1122 07:42:12.947604 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:42:12 crc kubenswrapper[4929]: E1122 07:42:12.948389 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:42:13 crc kubenswrapper[4929]: I1122 07:42:13.313156 4929 generic.go:334] "Generic (PLEG): container finished" podID="f311300e-5d96-4731-99a1-1e072280db75" containerID="4b1b298eb5c447935d1e4943ddb38c83063db90eecf04053950540937ea089a3" exitCode=0 Nov 22 07:42:13 crc kubenswrapper[4929]: I1122 07:42:13.313281 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7tkl7" event={"ID":"f311300e-5d96-4731-99a1-1e072280db75","Type":"ContainerDied","Data":"4b1b298eb5c447935d1e4943ddb38c83063db90eecf04053950540937ea089a3"} Nov 22 07:42:13 crc kubenswrapper[4929]: I1122 07:42:13.861344 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:13 crc kubenswrapper[4929]: I1122 07:42:13.974455 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:42:13 crc kubenswrapper[4929]: I1122 07:42:13.974655 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-zg7gb" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="dnsmasq-dns" containerID="cri-o://dda67dbf58cd6aef06969937f8f65ded435aba5e9af21ebf9d55c32e5ee7af24" gracePeriod=10 Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.324867 4929 scope.go:117] "RemoveContainer" containerID="36222877e144232ecdd692b0b7de2d92d9e0f9677a1af113741cc346834fe9f3" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.338088 4929 generic.go:334] "Generic (PLEG): container finished" podID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerID="dda67dbf58cd6aef06969937f8f65ded435aba5e9af21ebf9d55c32e5ee7af24" exitCode=0 Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.338928 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerDied","Data":"dda67dbf58cd6aef06969937f8f65ded435aba5e9af21ebf9d55c32e5ee7af24"} Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.344349 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" probeResult="failure" output=< Nov 22 07:42:14 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:42:14 crc kubenswrapper[4929]: > Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.392645 4929 scope.go:117] "RemoveContainer" containerID="40a0e5f6b9c084a4f6a9185a702487e79145b2ca8b7d9ae822ba278aec69a344" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.537337 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.576884 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8557\" (UniqueName: \"kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557\") pod \"3d1bd063-db18-4012-85a9-fc270e6d5782\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.577007 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc\") pod \"3d1bd063-db18-4012-85a9-fc270e6d5782\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.577175 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb\") pod \"3d1bd063-db18-4012-85a9-fc270e6d5782\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.577271 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb\") pod \"3d1bd063-db18-4012-85a9-fc270e6d5782\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.577349 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config\") pod \"3d1bd063-db18-4012-85a9-fc270e6d5782\" (UID: \"3d1bd063-db18-4012-85a9-fc270e6d5782\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.617165 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557" (OuterVolumeSpecName: "kube-api-access-c8557") pod "3d1bd063-db18-4012-85a9-fc270e6d5782" (UID: "3d1bd063-db18-4012-85a9-fc270e6d5782"). InnerVolumeSpecName "kube-api-access-c8557". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.656437 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d1bd063-db18-4012-85a9-fc270e6d5782" (UID: "3d1bd063-db18-4012-85a9-fc270e6d5782"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.656455 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config" (OuterVolumeSpecName: "config") pod "3d1bd063-db18-4012-85a9-fc270e6d5782" (UID: "3d1bd063-db18-4012-85a9-fc270e6d5782"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.660019 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d1bd063-db18-4012-85a9-fc270e6d5782" (UID: "3d1bd063-db18-4012-85a9-fc270e6d5782"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.663711 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.674799 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d1bd063-db18-4012-85a9-fc270e6d5782" (UID: "3d1bd063-db18-4012-85a9-fc270e6d5782"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.680769 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99n9b\" (UniqueName: \"kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b\") pod \"f311300e-5d96-4731-99a1-1e072280db75\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.680938 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle\") pod \"f311300e-5d96-4731-99a1-1e072280db75\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.680997 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data\") pod \"f311300e-5d96-4731-99a1-1e072280db75\" (UID: \"f311300e-5d96-4731-99a1-1e072280db75\") " Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.681366 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.681379 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8557\" (UniqueName: \"kubernetes.io/projected/3d1bd063-db18-4012-85a9-fc270e6d5782-kube-api-access-c8557\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.681388 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.681397 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.681405 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d1bd063-db18-4012-85a9-fc270e6d5782-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.685787 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b" (OuterVolumeSpecName: "kube-api-access-99n9b") pod "f311300e-5d96-4731-99a1-1e072280db75" (UID: "f311300e-5d96-4731-99a1-1e072280db75"). InnerVolumeSpecName "kube-api-access-99n9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.706557 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f311300e-5d96-4731-99a1-1e072280db75" (UID: "f311300e-5d96-4731-99a1-1e072280db75"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.729075 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data" (OuterVolumeSpecName: "config-data") pod "f311300e-5d96-4731-99a1-1e072280db75" (UID: "f311300e-5d96-4731-99a1-1e072280db75"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.784052 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.784110 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f311300e-5d96-4731-99a1-1e072280db75-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:14 crc kubenswrapper[4929]: I1122 07:42:14.784124 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-99n9b\" (UniqueName: \"kubernetes.io/projected/f311300e-5d96-4731-99a1-1e072280db75-kube-api-access-99n9b\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.350868 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-zg7gb" event={"ID":"3d1bd063-db18-4012-85a9-fc270e6d5782","Type":"ContainerDied","Data":"22ef5a01ff33c5e178b59af73abc9e2d451f41a04c86ead077aadfc127d5094c"} Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.350920 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-zg7gb" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.350926 4929 scope.go:117] "RemoveContainer" containerID="dda67dbf58cd6aef06969937f8f65ded435aba5e9af21ebf9d55c32e5ee7af24" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.352585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-7tkl7" event={"ID":"f311300e-5d96-4731-99a1-1e072280db75","Type":"ContainerDied","Data":"14c3f081825b0d1688754194974124560d9efac173d14e974a310dab3676cb67"} Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.352611 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-7tkl7" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.352622 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="14c3f081825b0d1688754194974124560d9efac173d14e974a310dab3676cb67" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.378475 4929 scope.go:117] "RemoveContainer" containerID="d18510dfc3a98f4a1de161d507cdd6bf21b0f4714b14716131c4c0882b470d35" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.416926 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.425845 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-zg7gb"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.628845 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:15 crc kubenswrapper[4929]: E1122 07:42:15.629623 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" containerName="watcher-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.629651 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" containerName="watcher-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: E1122 07:42:15.629688 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="dnsmasq-dns" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.629695 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="dnsmasq-dns" Nov 22 07:42:15 crc kubenswrapper[4929]: E1122 07:42:15.629721 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f311300e-5d96-4731-99a1-1e072280db75" containerName="keystone-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.629729 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f311300e-5d96-4731-99a1-1e072280db75" containerName="keystone-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: E1122 07:42:15.629756 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="init" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.629763 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="init" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.629982 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" containerName="dnsmasq-dns" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.630003 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" containerName="watcher-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.630026 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f311300e-5d96-4731-99a1-1e072280db75" containerName="keystone-db-sync" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.631713 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.642824 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8gmxj"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.644273 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.648157 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.649151 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jg2tk" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.649502 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.649619 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.649735 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.657031 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.665867 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8gmxj"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703379 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703435 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rwbc\" (UniqueName: \"kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703572 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703746 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703838 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703918 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703970 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8vq8\" (UniqueName: \"kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.703998 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.704036 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.704062 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.704090 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.704139 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.757971 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.759671 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.765695 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.765933 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-wlm2p" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.767129 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.770965 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.774480 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.783556 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.805382 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.806850 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.809961 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813045 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813102 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813132 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rwbc\" (UniqueName: \"kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813160 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813225 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813264 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813292 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813315 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813356 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mmpb\" (UniqueName: \"kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813385 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8vq8\" (UniqueName: \"kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813458 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813483 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813509 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6zvj\" (UniqueName: \"kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813536 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813563 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813591 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813614 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813667 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813716 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.813749 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.814825 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.815198 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.815808 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.815886 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.830452 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.831973 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.837013 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.845012 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.849534 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.855649 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.857872 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rwbc\" (UniqueName: \"kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc\") pod \"dnsmasq-dns-5959f8865f-499ph\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.858089 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8vq8\" (UniqueName: \"kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.858597 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle\") pod \"keystone-bootstrap-8gmxj\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.879000 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914749 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914793 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914828 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914848 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914891 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914911 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914928 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-578qq\" (UniqueName: \"kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914953 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mmpb\" (UniqueName: \"kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914981 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.914999 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6zvj\" (UniqueName: \"kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.915038 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.915067 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.915082 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.915103 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.915140 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.917071 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.922092 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.926886 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.939967 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.944009 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.945532 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.945743 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.951623 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.952743 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.953417 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.953943 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.954140 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-z9crk" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.954278 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.954468 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.960175 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6zvj\" (UniqueName: \"kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj\") pod \"watcher-applier-0\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " pod="openstack/watcher-applier-0" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.967984 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:42:15 crc kubenswrapper[4929]: I1122 07:42:15.975117 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mmpb\" (UniqueName: \"kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb\") pod \"watcher-api-0\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " pod="openstack/watcher-api-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.015297 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d1bd063-db18-4012-85a9-fc270e6d5782" path="/var/lib/kubelet/pods/3d1bd063-db18-4012-85a9-fc270e6d5782/volumes" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.015854 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-tqf65"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016260 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhwjb\" (UniqueName: \"kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016398 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-578qq\" (UniqueName: \"kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016492 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016593 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016696 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016775 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.016843 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.024087 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.024185 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.024363 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.022811 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tqf65"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.017546 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.022888 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.029949 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gggtz" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.030170 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.030335 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.032929 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.035721 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.049596 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.073397 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.075456 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.086694 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.186190 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.186569 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.194625 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.211543 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-578qq\" (UniqueName: \"kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq\") pod \"watcher-decision-engine-0\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.218871 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.218925 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.218974 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wj44\" (UniqueName: \"kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219010 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219037 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219068 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219102 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219124 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219146 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219204 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219273 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhwjb\" (UniqueName: \"kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219293 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.219695 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.240344 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.241257 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.242622 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.287071 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.354418 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-r6lzk"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.355670 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.356945 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhwjb\" (UniqueName: \"kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb\") pod \"horizon-69b66b77c7-hh8cg\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.363035 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-r6lzk"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.370792 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.372940 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.378167 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-9cvcv"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.379081 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.381411 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.394839 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.394920 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wj44\" (UniqueName: \"kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.394961 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395002 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8dfm\" (UniqueName: \"kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395024 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395040 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395091 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395120 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395161 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.395192 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.411486 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.411878 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.412002 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-z6srv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.412154 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8sbpl" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.413067 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.413694 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.413086 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.420185 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.429725 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.430103 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.433846 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.454593 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-9cvcv"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.479588 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wj44\" (UniqueName: \"kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44\") pod \"ceilometer-0\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496343 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjgpl\" (UniqueName: \"kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496402 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496437 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496460 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496488 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8dfm\" (UniqueName: \"kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496523 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc4mg\" (UniqueName: \"kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496553 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496575 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496594 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496620 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496641 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496658 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496705 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496727 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4kzh\" (UniqueName: \"kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496750 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.496800 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.502682 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.514312 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.522038 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.544296 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8dfm\" (UniqueName: \"kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.544951 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config\") pod \"neutron-db-sync-tqf65\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.554281 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.555839 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.565174 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-rsm44"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.566414 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.569413 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.569637 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jrjkj" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.569746 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.592286 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.599577 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601455 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnqhm\" (UniqueName: \"kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601551 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601570 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjgpl\" (UniqueName: \"kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601594 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601617 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601640 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601655 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601678 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601699 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601720 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc4mg\" (UniqueName: \"kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601737 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601753 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601780 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601817 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601832 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601854 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601872 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601899 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601918 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601937 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601958 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rr75\" (UniqueName: \"kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.601985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.602006 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4kzh\" (UniqueName: \"kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.602025 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.611149 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.613858 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.620881 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.621103 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.634277 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-rsm44"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.655961 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.677722 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.678776 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.679025 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tqf65" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.684278 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc4mg\" (UniqueName: \"kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.684947 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.685298 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.685525 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.685565 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjgpl\" (UniqueName: \"kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl\") pod \"barbican-db-sync-9cvcv\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.693859 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle\") pod \"cinder-db-sync-r6lzk\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.695424 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.699496 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4kzh\" (UniqueName: \"kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh\") pod \"horizon-6778fbb94f-xzkz8\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704484 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnqhm\" (UniqueName: \"kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704566 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704598 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704623 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704698 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704719 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704797 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704858 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704885 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.704951 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.705011 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rr75\" (UniqueName: \"kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.706532 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.707291 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.707793 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.707853 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.708198 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.708294 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.708232 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.708553 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.726483 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.737467 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnqhm\" (UniqueName: \"kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm\") pod \"placement-db-sync-rsm44\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.761705 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.761733 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rr75\" (UniqueName: \"kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75\") pod \"dnsmasq-dns-58dd9ff6bc-c72g6\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.800196 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.914566 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.930715 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.986494 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:42:16 crc kubenswrapper[4929]: I1122 07:42:16.994962 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rsm44" Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.079542 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8gmxj"] Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.191692 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18461154_ba2d_496b_a4a8_0f14f91b64af.slice/crio-3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c WatchSource:0}: Error finding container 3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c: Status 404 returned error can't find the container with id 3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.281322 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.286374 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.312202 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e41e21e_417a_4355_904b_8a2ff7ef19b5.slice/crio-fd6c125d0dfcab48660df7220d09b94166c37581282d1bc0b374b819db63aa9d WatchSource:0}: Error finding container fd6c125d0dfcab48660df7220d09b94166c37581282d1bc0b374b819db63aa9d: Status 404 returned error can't find the container with id fd6c125d0dfcab48660df7220d09b94166c37581282d1bc0b374b819db63aa9d Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.353227 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod524a1cf1_3627_4e2e_ae71_7648ba4462fa.slice/crio-f3393fe110150a3c12059e5d2f96d34e8ab430540f60837a55d208a53b8b2b91 WatchSource:0}: Error finding container f3393fe110150a3c12059e5d2f96d34e8ab430540f60837a55d208a53b8b2b91: Status 404 returned error can't find the container with id f3393fe110150a3c12059e5d2f96d34e8ab430540f60837a55d208a53b8b2b91 Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.443747 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.455519 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc276fbf8_d0a2_457b_a718_a0bc7347f427.slice/crio-3f6c9416ff0cb2e9103551fe7d5cd1aee10120f472ba86ccbbf30969604b1738 WatchSource:0}: Error finding container 3f6c9416ff0cb2e9103551fe7d5cd1aee10120f472ba86ccbbf30969604b1738: Status 404 returned error can't find the container with id 3f6c9416ff0cb2e9103551fe7d5cd1aee10120f472ba86ccbbf30969604b1738 Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.502504 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.505867 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69b66b77c7-hh8cg" event={"ID":"c276fbf8-d0a2-457b-a718-a0bc7347f427","Type":"ContainerStarted","Data":"3f6c9416ff0cb2e9103551fe7d5cd1aee10120f472ba86ccbbf30969604b1738"} Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.540098 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"524a1cf1-3627-4e2e-ae71-7648ba4462fa","Type":"ContainerStarted","Data":"f3393fe110150a3c12059e5d2f96d34e8ab430540f60837a55d208a53b8b2b91"} Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.542466 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerStarted","Data":"fd6c125d0dfcab48660df7220d09b94166c37581282d1bc0b374b819db63aa9d"} Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.543657 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-499ph" event={"ID":"e25ee40c-5b15-4aff-9007-efc343313d20","Type":"ContainerStarted","Data":"a22359b1de196b57a08b0c8a43a32e61ff9c4d6e3974853cadda949155ed5e2a"} Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.546305 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gmxj" event={"ID":"18461154-ba2d-496b-a4a8-0f14f91b64af","Type":"ContainerStarted","Data":"3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c"} Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.549049 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.568057 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-tqf65"] Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.588865 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59621e9f_5111_473e_b99f_02b09934b5ad.slice/crio-36f331230e4d83355996655a111c2a52736a75ab217475b4fbae05e9c8c70e3a WatchSource:0}: Error finding container 36f331230e4d83355996655a111c2a52736a75ab217475b4fbae05e9c8c70e3a: Status 404 returned error can't find the container with id 36f331230e4d83355996655a111c2a52736a75ab217475b4fbae05e9c8c70e3a Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.595445 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebe2ed5d_6475_409a_a3a9_9a47d3de685b.slice/crio-6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f WatchSource:0}: Error finding container 6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f: Status 404 returned error can't find the container with id 6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.614420 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-9cvcv"] Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.697809 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.874731 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-r6lzk"] Nov 22 07:42:17 crc kubenswrapper[4929]: W1122 07:42:17.898139 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c98cd8c_8d42_4c8e_9ff9_84de2dafb18c.slice/crio-060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564 WatchSource:0}: Error finding container 060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564: Status 404 returned error can't find the container with id 060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564 Nov 22 07:42:17 crc kubenswrapper[4929]: I1122 07:42:17.925656 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.020946 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-rsm44"] Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.554657 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tqf65" event={"ID":"ebe2ed5d-6475-409a-a3a9-9a47d3de685b","Type":"ContainerStarted","Data":"6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.556125 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r6lzk" event={"ID":"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c","Type":"ContainerStarted","Data":"060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.557586 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6778fbb94f-xzkz8" event={"ID":"e571c2d8-a9a3-4a03-b95f-35027b6d90fc","Type":"ContainerStarted","Data":"a652622b7045f2f020e49efdf8919bd65c0b606a131a5a634550b9fe0eef6b88"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.558888 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9cvcv" event={"ID":"7e5572bd-b4bf-4476-9247-06d7c892dcf1","Type":"ContainerStarted","Data":"4a4cf227f45b5362beabfc9cd08fc878d00eeb7c24e292f81338ef4b54f02562"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.560493 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rsm44" event={"ID":"155c1bdd-4b26-4059-8ab7-7a6299bc17c9","Type":"ContainerStarted","Data":"b52e461ef02e8d1eabf60a06c855c2412aeda87e02d6e80b3c819f5fc68f351b"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.561436 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"57774149-a18c-475d-ad44-23687ced2981","Type":"ContainerStarted","Data":"1b40cb180d5c57abdfbd8cfbea1debedca0e4975c4e43d26cf5a212170d3fba4"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.562763 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerStarted","Data":"36f331230e4d83355996655a111c2a52736a75ab217475b4fbae05e9c8c70e3a"} Nov 22 07:42:18 crc kubenswrapper[4929]: I1122 07:42:18.564006 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerStarted","Data":"d581e57b0890f59e85a0fa869b7ef988ca25fe313e4fd2daa7e93f2c351c7c71"} Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.471823 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.498022 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.518792 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.545429 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.547107 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.554348 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.572296 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-499ph" event={"ID":"e25ee40c-5b15-4aff-9007-efc343313d20","Type":"ContainerStarted","Data":"e7be256efc37a412befcc6ef257bbca19ab8b36c6194372bd31e4c5dc33ed903"} Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.573955 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gmxj" event={"ID":"18461154-ba2d-496b-a4a8-0f14f91b64af","Type":"ContainerStarted","Data":"e9733877fa4257bd13e7a09e1ea4585937dcd795e20c87d558346307cd476837"} Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.603991 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.604046 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.604077 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.604155 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.604234 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbf96\" (UniqueName: \"kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.705365 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.705413 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.705439 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.705489 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.705544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbf96\" (UniqueName: \"kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.706079 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.706769 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.706859 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.715646 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.721502 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbf96\" (UniqueName: \"kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96\") pod \"horizon-8449665697-bfcjw\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:19 crc kubenswrapper[4929]: I1122 07:42:19.862914 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.332443 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.585823 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tqf65" event={"ID":"ebe2ed5d-6475-409a-a3a9-9a47d3de685b","Type":"ContainerStarted","Data":"e1a366137201b6689cd0e75aadbbf83f57a48e308149db83354111c219e52e1d"} Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.587529 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerStarted","Data":"e873dfe0e69e5f5e35ead7b8396e99d16dcd9d8b859bf1a0f29777a3e66257e2"} Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.588819 4929 generic.go:334] "Generic (PLEG): container finished" podID="e25ee40c-5b15-4aff-9007-efc343313d20" containerID="e7be256efc37a412befcc6ef257bbca19ab8b36c6194372bd31e4c5dc33ed903" exitCode=0 Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.588880 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-499ph" event={"ID":"e25ee40c-5b15-4aff-9007-efc343313d20","Type":"ContainerDied","Data":"e7be256efc37a412befcc6ef257bbca19ab8b36c6194372bd31e4c5dc33ed903"} Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.590112 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8449665697-bfcjw" event={"ID":"520c3db2-3804-4bb6-a2b8-b5e51389f66d","Type":"ContainerStarted","Data":"cb757a06062ec7762bc7a1f93309802298f8218c37cde1eb242a9a9c8d7d6774"} Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.592951 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerStarted","Data":"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a"} Nov 22 07:42:20 crc kubenswrapper[4929]: I1122 07:42:20.640345 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8gmxj" podStartSLOduration=5.6403224309999995 podStartE2EDuration="5.640322431s" podCreationTimestamp="2025-11-22 07:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:42:20.633649524 +0000 UTC m=+1877.743103537" watchObservedRunningTime="2025-11-22 07:42:20.640322431 +0000 UTC m=+1877.749776444" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.156166 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.233364 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.234011 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.234314 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.234439 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rwbc\" (UniqueName: \"kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.234541 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.234648 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb\") pod \"e25ee40c-5b15-4aff-9007-efc343313d20\" (UID: \"e25ee40c-5b15-4aff-9007-efc343313d20\") " Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.257266 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc" (OuterVolumeSpecName: "kube-api-access-9rwbc") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "kube-api-access-9rwbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.262528 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.266798 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.296130 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.307081 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.338144 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.338443 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.338454 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rwbc\" (UniqueName: \"kubernetes.io/projected/e25ee40c-5b15-4aff-9007-efc343313d20-kube-api-access-9rwbc\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.338464 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.338473 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.371838 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config" (OuterVolumeSpecName: "config") pod "e25ee40c-5b15-4aff-9007-efc343313d20" (UID: "e25ee40c-5b15-4aff-9007-efc343313d20"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.440361 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e25ee40c-5b15-4aff-9007-efc343313d20-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.631937 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5959f8865f-499ph" event={"ID":"e25ee40c-5b15-4aff-9007-efc343313d20","Type":"ContainerDied","Data":"a22359b1de196b57a08b0c8a43a32e61ff9c4d6e3974853cadda949155ed5e2a"} Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.632002 4929 scope.go:117] "RemoveContainer" containerID="e7be256efc37a412befcc6ef257bbca19ab8b36c6194372bd31e4c5dc33ed903" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.632437 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5959f8865f-499ph" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.636338 4929 generic.go:334] "Generic (PLEG): container finished" podID="4b60b952-faed-4624-a268-ec9a01c4271a" containerID="d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a" exitCode=0 Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.636745 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerDied","Data":"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a"} Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.655277 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-tqf65" podStartSLOduration=6.655261675 podStartE2EDuration="6.655261675s" podCreationTimestamp="2025-11-22 07:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:42:21.65386376 +0000 UTC m=+1878.763317783" watchObservedRunningTime="2025-11-22 07:42:21.655261675 +0000 UTC m=+1878.764715688" Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.737502 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.746925 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5959f8865f-499ph"] Nov 22 07:42:21 crc kubenswrapper[4929]: I1122 07:42:21.958568 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e25ee40c-5b15-4aff-9007-efc343313d20" path="/var/lib/kubelet/pods/e25ee40c-5b15-4aff-9007-efc343313d20/volumes" Nov 22 07:42:22 crc kubenswrapper[4929]: I1122 07:42:22.644604 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerStarted","Data":"9de14adf6155c62e3fc9638bd060dabb48765c4ecea551f6021ecbc1bec06396"} Nov 22 07:42:23 crc kubenswrapper[4929]: I1122 07:42:23.368131 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:23 crc kubenswrapper[4929]: I1122 07:42:23.465271 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.621909 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.664659 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerStarted","Data":"26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90"} Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.664814 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api-log" containerID="cri-o://e873dfe0e69e5f5e35ead7b8396e99d16dcd9d8b859bf1a0f29777a3e66257e2" gracePeriod=30 Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.664841 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" containerID="cri-o://9de14adf6155c62e3fc9638bd060dabb48765c4ecea551f6021ecbc1bec06396" gracePeriod=30 Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.664962 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" containerID="cri-o://6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" gracePeriod=2 Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.665278 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.686620 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": EOF" Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.707488 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=9.707468702 podStartE2EDuration="9.707468702s" podCreationTimestamp="2025-11-22 07:42:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:42:24.69460675 +0000 UTC m=+1881.804060783" watchObservedRunningTime="2025-11-22 07:42:24.707468702 +0000 UTC m=+1881.816922715" Nov 22 07:42:24 crc kubenswrapper[4929]: I1122 07:42:24.725407 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" podStartSLOduration=8.72538416 podStartE2EDuration="8.72538416s" podCreationTimestamp="2025-11-22 07:42:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:42:24.721115273 +0000 UTC m=+1881.830569286" watchObservedRunningTime="2025-11-22 07:42:24.72538416 +0000 UTC m=+1881.834838173" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.405862 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.432975 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:42:25 crc kubenswrapper[4929]: E1122 07:42:25.433486 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e25ee40c-5b15-4aff-9007-efc343313d20" containerName="init" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.433503 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="e25ee40c-5b15-4aff-9007-efc343313d20" containerName="init" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.433715 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="e25ee40c-5b15-4aff-9007-efc343313d20" containerName="init" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.434802 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.437717 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.462713 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.499967 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.513406 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58894b567d-khmvq"] Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.515572 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.524886 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.524932 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5q55f\" (UniqueName: \"kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.524978 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.525000 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.525042 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.525060 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.525077 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.533262 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58894b567d-khmvq"] Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626245 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8f14a1c-f6bf-4a66-b839-393c7c34b932-logs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626323 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626350 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5q55f\" (UniqueName: \"kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626372 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctcf6\" (UniqueName: \"kubernetes.io/projected/d8f14a1c-f6bf-4a66-b839-393c7c34b932-kube-api-access-ctcf6\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626402 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-secret-key\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626457 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626471 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-tls-certs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626488 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-scripts\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626529 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626545 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626560 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626588 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-config-data\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.626624 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-combined-ca-bundle\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.627442 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.628699 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.630027 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.646755 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.648648 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.648824 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.652167 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5q55f\" (UniqueName: \"kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f\") pod \"horizon-598887bdc4-mxhdk\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727493 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctcf6\" (UniqueName: \"kubernetes.io/projected/d8f14a1c-f6bf-4a66-b839-393c7c34b932-kube-api-access-ctcf6\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727541 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-secret-key\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727581 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-tls-certs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727599 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-scripts\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727650 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-config-data\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727682 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-combined-ca-bundle\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.727717 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8f14a1c-f6bf-4a66-b839-393c7c34b932-logs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.728072 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8f14a1c-f6bf-4a66-b839-393c7c34b932-logs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.730101 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-scripts\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.732052 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d8f14a1c-f6bf-4a66-b839-393c7c34b932-config-data\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.739687 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-secret-key\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.746512 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-combined-ca-bundle\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.747436 4929 generic.go:334] "Generic (PLEG): container finished" podID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerID="e873dfe0e69e5f5e35ead7b8396e99d16dcd9d8b859bf1a0f29777a3e66257e2" exitCode=143 Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.748500 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerDied","Data":"e873dfe0e69e5f5e35ead7b8396e99d16dcd9d8b859bf1a0f29777a3e66257e2"} Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.748530 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.756941 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8f14a1c-f6bf-4a66-b839-393c7c34b932-horizon-tls-certs\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.765797 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctcf6\" (UniqueName: \"kubernetes.io/projected/d8f14a1c-f6bf-4a66-b839-393c7c34b932-kube-api-access-ctcf6\") pod \"horizon-58894b567d-khmvq\" (UID: \"d8f14a1c-f6bf-4a66-b839-393c7c34b932\") " pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.767238 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.840767 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:42:25 crc kubenswrapper[4929]: I1122 07:42:25.948442 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:42:25 crc kubenswrapper[4929]: E1122 07:42:25.948673 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.087492 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.472271 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58894b567d-khmvq"] Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.495672 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.866645 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zv95t_2584eaad-5c38-40d2-b1da-7a6268080fd0/registry-server/0.log" Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.868732 4929 generic.go:334] "Generic (PLEG): container finished" podID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" exitCode=0 Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.868824 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerDied","Data":"6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe"} Nov 22 07:42:26 crc kubenswrapper[4929]: I1122 07:42:26.868904 4929 scope.go:117] "RemoveContainer" containerID="8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099" Nov 22 07:42:31 crc kubenswrapper[4929]: I1122 07:42:31.117542 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:42:31 crc kubenswrapper[4929]: I1122 07:42:31.118701 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:31 crc kubenswrapper[4929]: I1122 07:42:31.932337 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.040683 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.040928 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" containerID="cri-o://4b666bf0a3163d13f9cc34e907110d44f4185d201360a35ceb73de743f89276b" gracePeriod=10 Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.942707 4929 generic.go:334] "Generic (PLEG): container finished" podID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerID="9de14adf6155c62e3fc9638bd060dabb48765c4ecea551f6021ecbc1bec06396" exitCode=0 Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.942790 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerDied","Data":"9de14adf6155c62e3fc9638bd060dabb48765c4ecea551f6021ecbc1bec06396"} Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.950513 4929 generic.go:334] "Generic (PLEG): container finished" podID="af6c6154-1378-401d-b568-cf37274933fb" containerID="4b666bf0a3163d13f9cc34e907110d44f4185d201360a35ceb73de743f89276b" exitCode=0 Nov 22 07:42:32 crc kubenswrapper[4929]: I1122 07:42:32.950947 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" event={"ID":"af6c6154-1378-401d-b568-cf37274933fb","Type":"ContainerDied","Data":"4b666bf0a3163d13f9cc34e907110d44f4185d201360a35ceb73de743f89276b"} Nov 22 07:42:33 crc kubenswrapper[4929]: E1122 07:42:33.263290 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:33 crc kubenswrapper[4929]: E1122 07:42:33.263746 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:33 crc kubenswrapper[4929]: E1122 07:42:33.264122 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:33 crc kubenswrapper[4929]: E1122 07:42:33.264189 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:42:33 crc kubenswrapper[4929]: I1122 07:42:33.848899 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:36 crc kubenswrapper[4929]: I1122 07:42:36.088421 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:37 crc kubenswrapper[4929]: E1122 07:42:37.784363 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 22 07:42:37 crc kubenswrapper[4929]: E1122 07:42:37.784665 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjgpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-9cvcv_openstack(7e5572bd-b4bf-4476-9247-06d7c892dcf1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:42:37 crc kubenswrapper[4929]: E1122 07:42:37.785879 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:42:37 crc kubenswrapper[4929]: I1122 07:42:37.948486 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:42:37 crc kubenswrapper[4929]: E1122 07:42:37.948925 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:42:38 crc kubenswrapper[4929]: E1122 07:42:38.011540 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:42:38 crc kubenswrapper[4929]: I1122 07:42:38.848744 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:41 crc kubenswrapper[4929]: I1122 07:42:41.087853 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:43 crc kubenswrapper[4929]: E1122 07:42:43.263250 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:43 crc kubenswrapper[4929]: E1122 07:42:43.264048 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:43 crc kubenswrapper[4929]: E1122 07:42:43.264387 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:43 crc kubenswrapper[4929]: E1122 07:42:43.264439 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:42:43 crc kubenswrapper[4929]: I1122 07:42:43.848939 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:43 crc kubenswrapper[4929]: I1122 07:42:43.849072 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:42:46 crc kubenswrapper[4929]: I1122 07:42:46.088610 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:48 crc kubenswrapper[4929]: I1122 07:42:48.848763 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:48 crc kubenswrapper[4929]: I1122 07:42:48.947763 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:42:48 crc kubenswrapper[4929]: E1122 07:42:48.948013 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:42:51 crc kubenswrapper[4929]: I1122 07:42:51.090322 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:53 crc kubenswrapper[4929]: E1122 07:42:53.262428 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:53 crc kubenswrapper[4929]: E1122 07:42:53.263487 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:53 crc kubenswrapper[4929]: E1122 07:42:53.263769 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 07:42:53 crc kubenswrapper[4929]: E1122 07:42:53.263862 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-zv95t" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:42:53 crc kubenswrapper[4929]: I1122 07:42:53.849184 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:56 crc kubenswrapper[4929]: I1122 07:42:56.090469 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": dial tcp 10.217.0.150:9322: connect: connection refused" Nov 22 07:42:58 crc kubenswrapper[4929]: W1122 07:42:58.722256 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8f14a1c_f6bf_4a66_b839_393c7c34b932.slice/crio-352f38444f1380c098a8f140aa743c88ac4d18d071ad41cd779a5f21dfad6da4 WatchSource:0}: Error finding container 352f38444f1380c098a8f140aa743c88ac4d18d071ad41cd779a5f21dfad6da4: Status 404 returned error can't find the container with id 352f38444f1380c098a8f140aa743c88ac4d18d071ad41cd779a5f21dfad6da4 Nov 22 07:42:58 crc kubenswrapper[4929]: W1122 07:42:58.726385 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89f5e117_d76e_446c_a83a_09737f044c1f.slice/crio-51fbd5d9a4b00913f47b4d25f22cc09ebafceadb61dda510125a727434f688f0 WatchSource:0}: Error finding container 51fbd5d9a4b00913f47b4d25f22cc09ebafceadb61dda510125a727434f688f0: Status 404 returned error can't find the container with id 51fbd5d9a4b00913f47b4d25f22cc09ebafceadb61dda510125a727434f688f0 Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.849461 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.860344 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.908369 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content\") pod \"2584eaad-5c38-40d2-b1da-7a6268080fd0\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.908698 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities\") pod \"2584eaad-5c38-40d2-b1da-7a6268080fd0\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.908783 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tm2nh\" (UniqueName: \"kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh\") pod \"2584eaad-5c38-40d2-b1da-7a6268080fd0\" (UID: \"2584eaad-5c38-40d2-b1da-7a6268080fd0\") " Nov 22 07:42:58 crc kubenswrapper[4929]: I1122 07:42:58.936025 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh" (OuterVolumeSpecName: "kube-api-access-tm2nh") pod "2584eaad-5c38-40d2-b1da-7a6268080fd0" (UID: "2584eaad-5c38-40d2-b1da-7a6268080fd0"). InnerVolumeSpecName "kube-api-access-tm2nh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.012557 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tm2nh\" (UniqueName: \"kubernetes.io/projected/2584eaad-5c38-40d2-b1da-7a6268080fd0-kube-api-access-tm2nh\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.106893 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities" (OuterVolumeSpecName: "utilities") pod "2584eaad-5c38-40d2-b1da-7a6268080fd0" (UID: "2584eaad-5c38-40d2-b1da-7a6268080fd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.114667 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.199823 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2584eaad-5c38-40d2-b1da-7a6268080fd0" (UID: "2584eaad-5c38-40d2-b1da-7a6268080fd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.211748 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58894b567d-khmvq" event={"ID":"d8f14a1c-f6bf-4a66-b839-393c7c34b932","Type":"ContainerStarted","Data":"352f38444f1380c098a8f140aa743c88ac4d18d071ad41cd779a5f21dfad6da4"} Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.216476 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zv95t" event={"ID":"2584eaad-5c38-40d2-b1da-7a6268080fd0","Type":"ContainerDied","Data":"fc35cb1a91f7a32493454e46b3231fa52877a1d60c65a1e2f71ecc1cfa53a385"} Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.216605 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zv95t" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.218451 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2584eaad-5c38-40d2-b1da-7a6268080fd0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.218870 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerStarted","Data":"51fbd5d9a4b00913f47b4d25f22cc09ebafceadb61dda510125a727434f688f0"} Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.267055 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.280295 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zv95t"] Nov 22 07:42:59 crc kubenswrapper[4929]: I1122 07:42:59.957413 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" path="/var/lib/kubelet/pods/2584eaad-5c38-40d2-b1da-7a6268080fd0/volumes" Nov 22 07:43:00 crc kubenswrapper[4929]: I1122 07:43:00.948069 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:43:00 crc kubenswrapper[4929]: E1122 07:43:00.948692 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:43:03 crc kubenswrapper[4929]: E1122 07:43:03.509797 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 07:43:03 crc kubenswrapper[4929]: E1122 07:43:03.510316 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n65ch9dh678h66fh66hf5h559h659h9dh647h98h65h675h9ch5ffh68bh586hffh5d6h9h569h58bh589h697h545hd6h79h74h599hffh5f9hd7q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f4kzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6778fbb94f-xzkz8_openstack(e571c2d8-a9a3-4a03-b95f-35027b6d90fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:03 crc kubenswrapper[4929]: E1122 07:43:03.513945 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6778fbb94f-xzkz8" podUID="e571c2d8-a9a3-4a03-b95f-35027b6d90fc" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.107114 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.111666 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.162072 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs\") pod \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.162162 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mmpb\" (UniqueName: \"kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb\") pod \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.162322 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle\") pod \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.162354 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca\") pod \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.162378 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data\") pod \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\" (UID: \"3e41e21e-417a-4355-904b-8a2ff7ef19b5\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.174255 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs" (OuterVolumeSpecName: "logs") pod "3e41e21e-417a-4355-904b-8a2ff7ef19b5" (UID: "3e41e21e-417a-4355-904b-8a2ff7ef19b5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.204286 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb" (OuterVolumeSpecName: "kube-api-access-6mmpb") pod "3e41e21e-417a-4355-904b-8a2ff7ef19b5" (UID: "3e41e21e-417a-4355-904b-8a2ff7ef19b5"). InnerVolumeSpecName "kube-api-access-6mmpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.207125 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "3e41e21e-417a-4355-904b-8a2ff7ef19b5" (UID: "3e41e21e-417a-4355-904b-8a2ff7ef19b5"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.212583 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e41e21e-417a-4355-904b-8a2ff7ef19b5" (UID: "3e41e21e-417a-4355-904b-8a2ff7ef19b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.246615 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data" (OuterVolumeSpecName: "config-data") pod "3e41e21e-417a-4355-904b-8a2ff7ef19b5" (UID: "3e41e21e-417a-4355-904b-8a2ff7ef19b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289274 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289389 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289421 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289454 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jssz\" (UniqueName: \"kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289510 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.289600 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb\") pod \"af6c6154-1378-401d-b568-cf37274933fb\" (UID: \"af6c6154-1378-401d-b568-cf37274933fb\") " Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.290043 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e41e21e-417a-4355-904b-8a2ff7ef19b5-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.290067 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mmpb\" (UniqueName: \"kubernetes.io/projected/3e41e21e-417a-4355-904b-8a2ff7ef19b5-kube-api-access-6mmpb\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.290082 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.290096 4929 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.290107 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e41e21e-417a-4355-904b-8a2ff7ef19b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.292034 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"3e41e21e-417a-4355-904b-8a2ff7ef19b5","Type":"ContainerDied","Data":"fd6c125d0dfcab48660df7220d09b94166c37581282d1bc0b374b819db63aa9d"} Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.292048 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.292473 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz" (OuterVolumeSpecName: "kube-api-access-4jssz") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "kube-api-access-4jssz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.296879 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" event={"ID":"af6c6154-1378-401d-b568-cf37274933fb","Type":"ContainerDied","Data":"8ef5eb293108ae985966fed0d0400d96793197618513e9b7d25253ca443096f2"} Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.297001 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.332091 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.338345 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.350743 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config" (OuterVolumeSpecName: "config") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.358470 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.360317 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370114 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370540 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="extract-content" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370568 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="extract-content" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370583 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api-log" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370590 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api-log" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370615 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="init" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370621 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="init" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370636 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="extract-utilities" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370643 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="extract-utilities" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370707 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370714 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370725 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370730 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370742 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370747 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: E1122 07:43:05.370762 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370768 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370955 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370968 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370978 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="2584eaad-5c38-40d2-b1da-7a6268080fd0" containerName="registry-server" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.370993 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.371004 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api-log" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.372083 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.377807 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.378524 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.380184 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.384920 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "af6c6154-1378-401d-b568-cf37274933fb" (UID: "af6c6154-1378-401d-b568-cf37274933fb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.391883 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.391910 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.391976 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.391985 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jssz\" (UniqueName: \"kubernetes.io/projected/af6c6154-1378-401d-b568-cf37274933fb-kube-api-access-4jssz\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.391994 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.392022 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/af6c6154-1378-401d-b568-cf37274933fb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.493748 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.493911 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ct94w\" (UniqueName: \"kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.493967 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.493994 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.494076 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.595634 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.595716 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ct94w\" (UniqueName: \"kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.595773 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.595799 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.595827 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.597329 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.603014 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.605927 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.611951 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.621881 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ct94w\" (UniqueName: \"kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w\") pod \"watcher-api-0\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.649468 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.656296 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-n4sjl"] Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.693905 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.961863 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" path="/var/lib/kubelet/pods/3e41e21e-417a-4355-904b-8a2ff7ef19b5/volumes" Nov 22 07:43:05 crc kubenswrapper[4929]: I1122 07:43:05.963700 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af6c6154-1378-401d-b568-cf37274933fb" path="/var/lib/kubelet/pods/af6c6154-1378-401d-b568-cf37274933fb/volumes" Nov 22 07:43:06 crc kubenswrapper[4929]: I1122 07:43:06.088810 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="3e41e21e-417a-4355-904b-8a2ff7ef19b5" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.150:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:08 crc kubenswrapper[4929]: I1122 07:43:08.848712 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-764c5664d7-n4sjl" podUID="af6c6154-1378-401d-b568-cf37274933fb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: i/o timeout" Nov 22 07:43:09 crc kubenswrapper[4929]: E1122 07:43:09.313344 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 07:43:09 crc kubenswrapper[4929]: E1122 07:43:09.313567 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5fbh66ch7fh64fh55dh9ch645h5f5h8dhch565h5f7h584hf4h96hf7hb4h99h8fhfch647h554h567hfch64dh669h9ch8fh57dh5ffh655h5c4q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nhwjb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-69b66b77c7-hh8cg_openstack(c276fbf8-d0a2-457b-a718-a0bc7347f427): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:09 crc kubenswrapper[4929]: E1122 07:43:09.315580 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-69b66b77c7-hh8cg" podUID="c276fbf8-d0a2-457b-a718-a0bc7347f427" Nov 22 07:43:10 crc kubenswrapper[4929]: E1122 07:43:10.037321 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 07:43:10 crc kubenswrapper[4929]: E1122 07:43:10.037902 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5cdhb4h599h55ch69hb6h698hfdh5cch5cbh96hcbh8dh7bh599h76h65dh565h548h5f8h569h64h64bh686h9h67ch584h99h5bdh64bh584h5bcq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:yes,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zbf96,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-8449665697-bfcjw_openstack(520c3db2-3804-4bb6-a2b8-b5e51389f66d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:10 crc kubenswrapper[4929]: E1122 07:43:10.040871 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-8449665697-bfcjw" podUID="520c3db2-3804-4bb6-a2b8-b5e51389f66d" Nov 22 07:43:10 crc kubenswrapper[4929]: I1122 07:43:10.879836 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:43:10 crc kubenswrapper[4929]: I1122 07:43:10.885842 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012589 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts\") pod \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012702 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data\") pod \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012744 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data\") pod \"c276fbf8-d0a2-457b-a718-a0bc7347f427\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012824 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhwjb\" (UniqueName: \"kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb\") pod \"c276fbf8-d0a2-457b-a718-a0bc7347f427\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012855 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts\") pod \"c276fbf8-d0a2-457b-a718-a0bc7347f427\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012888 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs\") pod \"c276fbf8-d0a2-457b-a718-a0bc7347f427\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012913 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key\") pod \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012931 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key\") pod \"c276fbf8-d0a2-457b-a718-a0bc7347f427\" (UID: \"c276fbf8-d0a2-457b-a718-a0bc7347f427\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.012951 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs\") pod \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013030 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbf96\" (UniqueName: \"kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96\") pod \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\" (UID: \"520c3db2-3804-4bb6-a2b8-b5e51389f66d\") " Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013106 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts" (OuterVolumeSpecName: "scripts") pod "520c3db2-3804-4bb6-a2b8-b5e51389f66d" (UID: "520c3db2-3804-4bb6-a2b8-b5e51389f66d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013285 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs" (OuterVolumeSpecName: "logs") pod "c276fbf8-d0a2-457b-a718-a0bc7347f427" (UID: "c276fbf8-d0a2-457b-a718-a0bc7347f427"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013433 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c276fbf8-d0a2-457b-a718-a0bc7347f427-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013449 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013887 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data" (OuterVolumeSpecName: "config-data") pod "c276fbf8-d0a2-457b-a718-a0bc7347f427" (UID: "c276fbf8-d0a2-457b-a718-a0bc7347f427"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.013932 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs" (OuterVolumeSpecName: "logs") pod "520c3db2-3804-4bb6-a2b8-b5e51389f66d" (UID: "520c3db2-3804-4bb6-a2b8-b5e51389f66d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.014182 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts" (OuterVolumeSpecName: "scripts") pod "c276fbf8-d0a2-457b-a718-a0bc7347f427" (UID: "c276fbf8-d0a2-457b-a718-a0bc7347f427"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.015434 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data" (OuterVolumeSpecName: "config-data") pod "520c3db2-3804-4bb6-a2b8-b5e51389f66d" (UID: "520c3db2-3804-4bb6-a2b8-b5e51389f66d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.019066 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "520c3db2-3804-4bb6-a2b8-b5e51389f66d" (UID: "520c3db2-3804-4bb6-a2b8-b5e51389f66d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.020005 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "c276fbf8-d0a2-457b-a718-a0bc7347f427" (UID: "c276fbf8-d0a2-457b-a718-a0bc7347f427"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.020196 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb" (OuterVolumeSpecName: "kube-api-access-nhwjb") pod "c276fbf8-d0a2-457b-a718-a0bc7347f427" (UID: "c276fbf8-d0a2-457b-a718-a0bc7347f427"). InnerVolumeSpecName "kube-api-access-nhwjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.021576 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96" (OuterVolumeSpecName: "kube-api-access-zbf96") pod "520c3db2-3804-4bb6-a2b8-b5e51389f66d" (UID: "520c3db2-3804-4bb6-a2b8-b5e51389f66d"). InnerVolumeSpecName "kube-api-access-zbf96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.114747 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115167 4929 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/520c3db2-3804-4bb6-a2b8-b5e51389f66d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115178 4929 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/c276fbf8-d0a2-457b-a718-a0bc7347f427-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115187 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/520c3db2-3804-4bb6-a2b8-b5e51389f66d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115195 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbf96\" (UniqueName: \"kubernetes.io/projected/520c3db2-3804-4bb6-a2b8-b5e51389f66d-kube-api-access-zbf96\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115225 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/520c3db2-3804-4bb6-a2b8-b5e51389f66d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115238 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c276fbf8-d0a2-457b-a718-a0bc7347f427-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.115249 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhwjb\" (UniqueName: \"kubernetes.io/projected/c276fbf8-d0a2-457b-a718-a0bc7347f427-kube-api-access-nhwjb\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.352339 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8449665697-bfcjw" event={"ID":"520c3db2-3804-4bb6-a2b8-b5e51389f66d","Type":"ContainerDied","Data":"cb757a06062ec7762bc7a1f93309802298f8218c37cde1eb242a9a9c8d7d6774"} Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.352454 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8449665697-bfcjw" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.354301 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-69b66b77c7-hh8cg" event={"ID":"c276fbf8-d0a2-457b-a718-a0bc7347f427","Type":"ContainerDied","Data":"3f6c9416ff0cb2e9103551fe7d5cd1aee10120f472ba86ccbbf30969604b1738"} Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.354395 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-69b66b77c7-hh8cg" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.407962 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-applier:watcher_latest" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.408013 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-applier:watcher_latest" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.408153 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-applier,Image:38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-applier:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n576hcdhd8h9dh54bh545h5bdh5cbh666h7hb4h89h698h89h5dchf7h9ch568h599hfh665h7dh59h657h554h54h65h578h85h5bch5cfh696q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-applier-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/watcher,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j6zvj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:10,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42451,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -r DRST watcher-applier],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-applier-0_openstack(524a1cf1-3627-4e2e-ae71-7648ba4462fa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.409559 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.426464 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.437136 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-8449665697-bfcjw"] Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.459732 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.478830 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-69b66b77c7-hh8cg"] Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.708330 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-decision-engine:watcher_latest" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.708397 4929 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-decision-engine:watcher_latest" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.708552 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-decision-engine,Image:38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-decision-engine:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5cbh67fh574h686hb5h694h58fh557h76h549hch68bh5d4h688h66h96hdh5bbhcfh678h5d9h556h5d8h685h89h55fhb6h5fdh64dh664h5cch78q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-decision-engine-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/watcher,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:custom-prometheus-ca,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/prometheus/ca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-578qq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42451,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pgrep -f -r DRST watcher-decision-engine],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:6,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-decision-engine-0_openstack(57774149-a18c-475d-ad44-23687ced2981): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:11 crc kubenswrapper[4929]: E1122 07:43:11.709830 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.959025 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="520c3db2-3804-4bb6-a2b8-b5e51389f66d" path="/var/lib/kubelet/pods/520c3db2-3804-4bb6-a2b8-b5e51389f66d/volumes" Nov 22 07:43:11 crc kubenswrapper[4929]: I1122 07:43:11.959755 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c276fbf8-d0a2-457b-a718-a0bc7347f427" path="/var/lib/kubelet/pods/c276fbf8-d0a2-457b-a718-a0bc7347f427/volumes" Nov 22 07:43:12 crc kubenswrapper[4929]: E1122 07:43:12.366428 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-applier\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-applier:watcher_latest\\\"\"" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" Nov 22 07:43:12 crc kubenswrapper[4929]: E1122 07:43:12.366351 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-decision-engine\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.196:5001/podified-epoxy-centos9/openstack-watcher-decision-engine:watcher_latest\\\"\"" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" Nov 22 07:43:14 crc kubenswrapper[4929]: I1122 07:43:14.554710 4929 scope.go:117] "RemoveContainer" containerID="9e1522b539920298a3c257a33e4a738be5a9df227ec123c00b421f1f1d1542a0" Nov 22 07:43:15 crc kubenswrapper[4929]: I1122 07:43:15.948113 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:43:15 crc kubenswrapper[4929]: E1122 07:43:15.948916 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.565502 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.568714 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = reading blob sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb: Get \"https://quay.io/v2/podified-antelope-centos9/openstack-cinder-api/blobs/sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb\": context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.568902 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dc4mg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-r6lzk_openstack(4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c): ErrImagePull: rpc error: code = Canceled desc = reading blob sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb: Get \"https://quay.io/v2/podified-antelope-centos9/openstack-cinder-api/blobs/sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb\": context canceled" logger="UnhandledError" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.570528 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.570526 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = reading blob sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb: Get \\\"https://quay.io/v2/podified-antelope-centos9/openstack-cinder-api/blobs/sha256:270b5c4b38bb0dcdf5ab595b4aba98ad6f0957ba5c54835d04578d2348cf63bb\\\": context canceled\"" pod="openstack/cinder-db-sync-r6lzk" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.570635 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pnqhm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-rsm44_openstack(155c1bdd-4b26-4059-8ab7-7a6299bc17c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.571848 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-rsm44" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.573441 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjgpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-9cvcv_openstack(7e5572bd-b4bf-4476-9247-06d7c892dcf1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:16 crc kubenswrapper[4929]: E1122 07:43:16.574639 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.685013 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832198 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key\") pod \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832379 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs\") pod \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832452 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts\") pod \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832490 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data\") pod \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832599 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4kzh\" (UniqueName: \"kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh\") pod \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\" (UID: \"e571c2d8-a9a3-4a03-b95f-35027b6d90fc\") " Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.832903 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs" (OuterVolumeSpecName: "logs") pod "e571c2d8-a9a3-4a03-b95f-35027b6d90fc" (UID: "e571c2d8-a9a3-4a03-b95f-35027b6d90fc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.833136 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts" (OuterVolumeSpecName: "scripts") pod "e571c2d8-a9a3-4a03-b95f-35027b6d90fc" (UID: "e571c2d8-a9a3-4a03-b95f-35027b6d90fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.833345 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data" (OuterVolumeSpecName: "config-data") pod "e571c2d8-a9a3-4a03-b95f-35027b6d90fc" (UID: "e571c2d8-a9a3-4a03-b95f-35027b6d90fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.833441 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.833460 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.839281 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh" (OuterVolumeSpecName: "kube-api-access-f4kzh") pod "e571c2d8-a9a3-4a03-b95f-35027b6d90fc" (UID: "e571c2d8-a9a3-4a03-b95f-35027b6d90fc"). InnerVolumeSpecName "kube-api-access-f4kzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.839476 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e571c2d8-a9a3-4a03-b95f-35027b6d90fc" (UID: "e571c2d8-a9a3-4a03-b95f-35027b6d90fc"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.935204 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.935263 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4kzh\" (UniqueName: \"kubernetes.io/projected/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-kube-api-access-f4kzh\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:16 crc kubenswrapper[4929]: I1122 07:43:16.935280 4929 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e571c2d8-a9a3-4a03-b95f-35027b6d90fc-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:17 crc kubenswrapper[4929]: I1122 07:43:17.436344 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6778fbb94f-xzkz8" event={"ID":"e571c2d8-a9a3-4a03-b95f-35027b6d90fc","Type":"ContainerDied","Data":"a652622b7045f2f020e49efdf8919bd65c0b606a131a5a634550b9fe0eef6b88"} Nov 22 07:43:17 crc kubenswrapper[4929]: I1122 07:43:17.436430 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6778fbb94f-xzkz8" Nov 22 07:43:17 crc kubenswrapper[4929]: E1122 07:43:17.438306 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-rsm44" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" Nov 22 07:43:17 crc kubenswrapper[4929]: E1122 07:43:17.445517 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-r6lzk" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" Nov 22 07:43:17 crc kubenswrapper[4929]: I1122 07:43:17.522660 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:43:17 crc kubenswrapper[4929]: I1122 07:43:17.530787 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6778fbb94f-xzkz8"] Nov 22 07:43:17 crc kubenswrapper[4929]: I1122 07:43:17.957631 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e571c2d8-a9a3-4a03-b95f-35027b6d90fc" path="/var/lib/kubelet/pods/e571c2d8-a9a3-4a03-b95f-35027b6d90fc/volumes" Nov 22 07:43:18 crc kubenswrapper[4929]: I1122 07:43:18.451578 4929 generic.go:334] "Generic (PLEG): container finished" podID="18461154-ba2d-496b-a4a8-0f14f91b64af" containerID="e9733877fa4257bd13e7a09e1ea4585937dcd795e20c87d558346307cd476837" exitCode=0 Nov 22 07:43:18 crc kubenswrapper[4929]: I1122 07:43:18.451634 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gmxj" event={"ID":"18461154-ba2d-496b-a4a8-0f14f91b64af","Type":"ContainerDied","Data":"e9733877fa4257bd13e7a09e1ea4585937dcd795e20c87d558346307cd476837"} Nov 22 07:43:18 crc kubenswrapper[4929]: I1122 07:43:18.615290 4929 scope.go:117] "RemoveContainer" containerID="640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467" Nov 22 07:43:18 crc kubenswrapper[4929]: I1122 07:43:18.633266 4929 scope.go:117] "RemoveContainer" containerID="6bc0af3e4d7fb35651a7df75cfac010416aa3ec3bad1a511ee574e903069e7fe" Nov 22 07:43:19 crc kubenswrapper[4929]: E1122 07:43:19.073137 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 22 07:43:19 crc kubenswrapper[4929]: E1122 07:43:19.073622 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfch5cbh64fh55dh9fh68h54h64fh68dh64fhb7h589h64ch5b7h66dh589h646hdbh588h5dbh696hb8hbfhbdhdh5dhfch67ch97hf7h555h657q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4wj44,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(59621e9f-5111-473e-b99f-02b09934b5ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.092635 4929 scope.go:117] "RemoveContainer" containerID="00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.116157 4929 scope.go:117] "RemoveContainer" containerID="8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099" Nov 22 07:43:19 crc kubenswrapper[4929]: E1122 07:43:19.117919 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099\": container with ID starting with 8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099 not found: ID does not exist" containerID="8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.117985 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099"} err="failed to get container status \"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099\": rpc error: code = NotFound desc = could not find container \"8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099\": container with ID starting with 8433b6e1feb3faea1eee8a9fc8a5b45358f3fb339c0bbfa06155fbe031127099 not found: ID does not exist" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.118014 4929 scope.go:117] "RemoveContainer" containerID="00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9" Nov 22 07:43:19 crc kubenswrapper[4929]: E1122 07:43:19.201005 4929 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_extract-content_redhat-operators-zv95t_openshift-marketplace_2584eaad-5c38-40d2-b1da-7a6268080fd0_0 in pod sandbox fc35cb1a91f7a32493454e46b3231fa52877a1d60c65a1e2f71ecc1cfa53a385 from index: no such id: '00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9'" containerID="00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.201097 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9"} err="rpc error: code = Unknown desc = failed to delete container k8s_extract-content_redhat-operators-zv95t_openshift-marketplace_2584eaad-5c38-40d2-b1da-7a6268080fd0_0 in pod sandbox fc35cb1a91f7a32493454e46b3231fa52877a1d60c65a1e2f71ecc1cfa53a385 from index: no such id: '00257251583bf2c203ce56a26642666f911b29b65b17f6b2882b0bdcf6eb88f9'" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.201153 4929 scope.go:117] "RemoveContainer" containerID="640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467" Nov 22 07:43:19 crc kubenswrapper[4929]: E1122 07:43:19.203968 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467\": container with ID starting with 640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467 not found: ID does not exist" containerID="640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.204023 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467"} err="failed to get container status \"640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467\": rpc error: code = NotFound desc = could not find container \"640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467\": container with ID starting with 640630986a26365009acc339264be8508a357b488804af0f1d66de9207159467 not found: ID does not exist" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.204059 4929 scope.go:117] "RemoveContainer" containerID="9de14adf6155c62e3fc9638bd060dabb48765c4ecea551f6021ecbc1bec06396" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.292072 4929 scope.go:117] "RemoveContainer" containerID="e873dfe0e69e5f5e35ead7b8396e99d16dcd9d8b859bf1a0f29777a3e66257e2" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.347874 4929 scope.go:117] "RemoveContainer" containerID="4b666bf0a3163d13f9cc34e907110d44f4185d201360a35ceb73de743f89276b" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.364286 4929 scope.go:117] "RemoveContainer" containerID="cab09343d115a2448c8c76416b2d6c3072b1883ba540c203b2baee517e5cc447" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.528240 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:19 crc kubenswrapper[4929]: W1122 07:43:19.530494 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14af86c0_0c4b_46b7_b2c5_b69a35346d62.slice/crio-602a02fb0d936a4b07e3cef8faa136d035e90f1dc4c2e2cfccb3d49a3ed8a010 WatchSource:0}: Error finding container 602a02fb0d936a4b07e3cef8faa136d035e90f1dc4c2e2cfccb3d49a3ed8a010: Status 404 returned error can't find the container with id 602a02fb0d936a4b07e3cef8faa136d035e90f1dc4c2e2cfccb3d49a3ed8a010 Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.792766 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900176 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8vq8\" (UniqueName: \"kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900239 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900321 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900404 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900432 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.900483 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts\") pod \"18461154-ba2d-496b-a4a8-0f14f91b64af\" (UID: \"18461154-ba2d-496b-a4a8-0f14f91b64af\") " Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.905080 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts" (OuterVolumeSpecName: "scripts") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.905516 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8" (OuterVolumeSpecName: "kube-api-access-z8vq8") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "kube-api-access-z8vq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.905661 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.905686 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.926335 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data" (OuterVolumeSpecName: "config-data") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:19 crc kubenswrapper[4929]: I1122 07:43:19.930846 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18461154-ba2d-496b-a4a8-0f14f91b64af" (UID: "18461154-ba2d-496b-a4a8-0f14f91b64af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.001982 4929 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.002119 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.002178 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.002272 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8vq8\" (UniqueName: \"kubernetes.io/projected/18461154-ba2d-496b-a4a8-0f14f91b64af-kube-api-access-z8vq8\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.002331 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.002391 4929 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/18461154-ba2d-496b-a4a8-0f14f91b64af-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.474324 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerStarted","Data":"1e5846a8a7fbe857567a3cc6533613194b998fe91636f70477ee443a5d74fea8"} Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.476308 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8gmxj" event={"ID":"18461154-ba2d-496b-a4a8-0f14f91b64af","Type":"ContainerDied","Data":"3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c"} Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.476358 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3702040d46290d6889454808091d728c6371c498adee889aecc38ec619ce4f2c" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.476322 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8gmxj" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.477591 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58894b567d-khmvq" event={"ID":"d8f14a1c-f6bf-4a66-b839-393c7c34b932","Type":"ContainerStarted","Data":"f007b9ca81b72a7bf429238c5636d5c1ef9640f23d3d260450802bc216279dc1"} Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.479168 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerStarted","Data":"563836a5ede3a77d33f2d6d3b4b7365330d18888cf72c5732a3ead24c015fa98"} Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.479231 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerStarted","Data":"602a02fb0d936a4b07e3cef8faa136d035e90f1dc4c2e2cfccb3d49a3ed8a010"} Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.656466 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8gmxj"] Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.664013 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8gmxj"] Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.750379 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-r9kl7"] Nov 22 07:43:20 crc kubenswrapper[4929]: E1122 07:43:20.750816 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18461154-ba2d-496b-a4a8-0f14f91b64af" containerName="keystone-bootstrap" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.750841 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="18461154-ba2d-496b-a4a8-0f14f91b64af" containerName="keystone-bootstrap" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.751084 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="18461154-ba2d-496b-a4a8-0f14f91b64af" containerName="keystone-bootstrap" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.751875 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.756242 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.756518 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jg2tk" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.757026 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.757374 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.757634 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.767077 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-r9kl7"] Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919561 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919745 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919802 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919843 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919871 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttnn7\" (UniqueName: \"kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:20 crc kubenswrapper[4929]: I1122 07:43:20.919903 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.021753 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.021807 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.021838 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.021862 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttnn7\" (UniqueName: \"kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.021894 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.022006 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.027854 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.027897 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.030412 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.030764 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.035685 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.052112 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttnn7\" (UniqueName: \"kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7\") pod \"keystone-bootstrap-r9kl7\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.148553 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.494394 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerStarted","Data":"505f8289cb33466275e04ef73d37f8f6843e0df97442a2e8e700e2aa6c93b895"} Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.494811 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.497856 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerStarted","Data":"453e798539b01813b6813b89f03b0b74fb79d7d3adc3616bf477691c690da79d"} Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.500469 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58894b567d-khmvq" event={"ID":"d8f14a1c-f6bf-4a66-b839-393c7c34b932","Type":"ContainerStarted","Data":"3c617c63e1d36631fa8fb570ee233b26ecc134abc7eddb094bb7a0ebc55435a1"} Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.528623 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=16.528608427 podStartE2EDuration="16.528608427s" podCreationTimestamp="2025-11-22 07:43:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:43:21.516897585 +0000 UTC m=+1938.626351628" watchObservedRunningTime="2025-11-22 07:43:21.528608427 +0000 UTC m=+1938.638062440" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.541849 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-598887bdc4-mxhdk" podStartSLOduration=36.15430516 podStartE2EDuration="56.541822848s" podCreationTimestamp="2025-11-22 07:42:25 +0000 UTC" firstStartedPulling="2025-11-22 07:42:58.72857855 +0000 UTC m=+1915.838032583" lastFinishedPulling="2025-11-22 07:43:19.116096258 +0000 UTC m=+1936.225550271" observedRunningTime="2025-11-22 07:43:21.537675824 +0000 UTC m=+1938.647129847" watchObservedRunningTime="2025-11-22 07:43:21.541822848 +0000 UTC m=+1938.651276881" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.563341 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-58894b567d-khmvq" podStartSLOduration=36.288153256 podStartE2EDuration="56.563313135s" podCreationTimestamp="2025-11-22 07:42:25 +0000 UTC" firstStartedPulling="2025-11-22 07:42:58.77458195 +0000 UTC m=+1915.884035993" lastFinishedPulling="2025-11-22 07:43:19.049741859 +0000 UTC m=+1936.159195872" observedRunningTime="2025-11-22 07:43:21.559718885 +0000 UTC m=+1938.669172928" watchObservedRunningTime="2025-11-22 07:43:21.563313135 +0000 UTC m=+1938.672767168" Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.618583 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-r9kl7"] Nov 22 07:43:21 crc kubenswrapper[4929]: W1122 07:43:21.630672 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb387b4f1_8159_4237_9e71_5235ffc8ac5a.slice/crio-080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9 WatchSource:0}: Error finding container 080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9: Status 404 returned error can't find the container with id 080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9 Nov 22 07:43:21 crc kubenswrapper[4929]: I1122 07:43:21.967903 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18461154-ba2d-496b-a4a8-0f14f91b64af" path="/var/lib/kubelet/pods/18461154-ba2d-496b-a4a8-0f14f91b64af/volumes" Nov 22 07:43:22 crc kubenswrapper[4929]: I1122 07:43:22.511120 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r9kl7" event={"ID":"b387b4f1-8159-4237-9e71-5235ffc8ac5a","Type":"ContainerStarted","Data":"b3b3bcd0576f944531b8e744b5ec5673fed69095061d999c4072383acda50cc0"} Nov 22 07:43:22 crc kubenswrapper[4929]: I1122 07:43:22.511177 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r9kl7" event={"ID":"b387b4f1-8159-4237-9e71-5235ffc8ac5a","Type":"ContainerStarted","Data":"080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9"} Nov 22 07:43:23 crc kubenswrapper[4929]: I1122 07:43:23.475504 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 22 07:43:23 crc kubenswrapper[4929]: I1122 07:43:23.546016 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-r9kl7" podStartSLOduration=3.545996213 podStartE2EDuration="3.545996213s" podCreationTimestamp="2025-11-22 07:43:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:43:23.543449819 +0000 UTC m=+1940.652903832" watchObservedRunningTime="2025-11-22 07:43:23.545996213 +0000 UTC m=+1940.655450226" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.694677 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.695101 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.702481 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.768307 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.769155 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.841845 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:43:25 crc kubenswrapper[4929]: I1122 07:43:25.841913 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:43:26 crc kubenswrapper[4929]: I1122 07:43:26.567292 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 22 07:43:26 crc kubenswrapper[4929]: I1122 07:43:26.947404 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:43:26 crc kubenswrapper[4929]: E1122 07:43:26.947624 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:43:30 crc kubenswrapper[4929]: E1122 07:43:30.090139 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:43:30 crc kubenswrapper[4929]: I1122 07:43:30.349110 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:30 crc kubenswrapper[4929]: I1122 07:43:30.349377 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" containerID="cri-o://563836a5ede3a77d33f2d6d3b4b7365330d18888cf72c5732a3ead24c015fa98" gracePeriod=30 Nov 22 07:43:30 crc kubenswrapper[4929]: I1122 07:43:30.349448 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" containerID="cri-o://505f8289cb33466275e04ef73d37f8f6843e0df97442a2e8e700e2aa6c93b895" gracePeriod=30 Nov 22 07:43:31 crc kubenswrapper[4929]: I1122 07:43:31.613519 4929 generic.go:334] "Generic (PLEG): container finished" podID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerID="563836a5ede3a77d33f2d6d3b4b7365330d18888cf72c5732a3ead24c015fa98" exitCode=143 Nov 22 07:43:31 crc kubenswrapper[4929]: I1122 07:43:31.614064 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerDied","Data":"563836a5ede3a77d33f2d6d3b4b7365330d18888cf72c5732a3ead24c015fa98"} Nov 22 07:43:33 crc kubenswrapper[4929]: I1122 07:43:33.634137 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"57774149-a18c-475d-ad44-23687ced2981","Type":"ContainerStarted","Data":"ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04"} Nov 22 07:43:33 crc kubenswrapper[4929]: I1122 07:43:33.635988 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerStarted","Data":"bf9683a4cb4ff10a632572acd7c666e0d095c304d84db4d4a0b2a39ba0e4b7ac"} Nov 22 07:43:33 crc kubenswrapper[4929]: I1122 07:43:33.660171 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=3.766473509 podStartE2EDuration="1m18.660154302s" podCreationTimestamp="2025-11-22 07:42:15 +0000 UTC" firstStartedPulling="2025-11-22 07:42:17.512489545 +0000 UTC m=+1874.621943558" lastFinishedPulling="2025-11-22 07:43:32.406170348 +0000 UTC m=+1949.515624351" observedRunningTime="2025-11-22 07:43:33.656430782 +0000 UTC m=+1950.765884805" watchObservedRunningTime="2025-11-22 07:43:33.660154302 +0000 UTC m=+1950.769608315" Nov 22 07:43:33 crc kubenswrapper[4929]: I1122 07:43:33.788020 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": read tcp 10.217.0.2:59910->10.217.0.164:9322: read: connection reset by peer" Nov 22 07:43:33 crc kubenswrapper[4929]: I1122 07:43:33.789718 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": read tcp 10.217.0.2:59914->10.217.0.164:9322: read: connection reset by peer" Nov 22 07:43:34 crc kubenswrapper[4929]: I1122 07:43:34.646781 4929 generic.go:334] "Generic (PLEG): container finished" podID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerID="505f8289cb33466275e04ef73d37f8f6843e0df97442a2e8e700e2aa6c93b895" exitCode=0 Nov 22 07:43:34 crc kubenswrapper[4929]: I1122 07:43:34.646875 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerDied","Data":"505f8289cb33466275e04ef73d37f8f6843e0df97442a2e8e700e2aa6c93b895"} Nov 22 07:43:35 crc kubenswrapper[4929]: I1122 07:43:35.694752 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": dial tcp 10.217.0.164:9322: connect: connection refused" Nov 22 07:43:35 crc kubenswrapper[4929]: I1122 07:43:35.694815 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": dial tcp 10.217.0.164:9322: connect: connection refused" Nov 22 07:43:35 crc kubenswrapper[4929]: I1122 07:43:35.770125 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:43:35 crc kubenswrapper[4929]: I1122 07:43:35.842982 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-58894b567d-khmvq" podUID="d8f14a1c-f6bf-4a66-b839-393c7c34b932" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.163:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.163:8443: connect: connection refused" Nov 22 07:43:36 crc kubenswrapper[4929]: I1122 07:43:36.503829 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 22 07:43:36 crc kubenswrapper[4929]: I1122 07:43:36.536358 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 22 07:43:36 crc kubenswrapper[4929]: I1122 07:43:36.663999 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 22 07:43:36 crc kubenswrapper[4929]: I1122 07:43:36.689560 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 22 07:43:36 crc kubenswrapper[4929]: I1122 07:43:36.724756 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:43:38 crc kubenswrapper[4929]: I1122 07:43:38.680812 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" containerID="cri-o://ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" gracePeriod=30 Nov 22 07:43:39 crc kubenswrapper[4929]: I1122 07:43:39.692564 4929 generic.go:334] "Generic (PLEG): container finished" podID="b387b4f1-8159-4237-9e71-5235ffc8ac5a" containerID="b3b3bcd0576f944531b8e744b5ec5673fed69095061d999c4072383acda50cc0" exitCode=0 Nov 22 07:43:39 crc kubenswrapper[4929]: I1122 07:43:39.692648 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r9kl7" event={"ID":"b387b4f1-8159-4237-9e71-5235ffc8ac5a","Type":"ContainerDied","Data":"b3b3bcd0576f944531b8e744b5ec5673fed69095061d999c4072383acda50cc0"} Nov 22 07:43:41 crc kubenswrapper[4929]: I1122 07:43:41.947528 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:43:41 crc kubenswrapper[4929]: E1122 07:43:41.948045 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:43:45 crc kubenswrapper[4929]: I1122 07:43:45.695404 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:45 crc kubenswrapper[4929]: I1122 07:43:45.695533 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:45 crc kubenswrapper[4929]: I1122 07:43:45.696506 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:43:45 crc kubenswrapper[4929]: I1122 07:43:45.696556 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:43:46 crc kubenswrapper[4929]: E1122 07:43:46.505878 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:43:46 crc kubenswrapper[4929]: E1122 07:43:46.508655 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:43:46 crc kubenswrapper[4929]: E1122 07:43:46.509759 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:43:46 crc kubenswrapper[4929]: E1122 07:43:46.509800 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:43:49 crc kubenswrapper[4929]: I1122 07:43:49.229308 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:43:49 crc kubenswrapper[4929]: I1122 07:43:49.261860 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:43:50 crc kubenswrapper[4929]: I1122 07:43:50.696627 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:50 crc kubenswrapper[4929]: I1122 07:43:50.696675 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:50 crc kubenswrapper[4929]: I1122 07:43:50.906459 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:43:50 crc kubenswrapper[4929]: I1122 07:43:50.995611 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-58894b567d-khmvq" Nov 22 07:43:51 crc kubenswrapper[4929]: I1122 07:43:51.049180 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:43:51 crc kubenswrapper[4929]: I1122 07:43:51.827593 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon-log" containerID="cri-o://1e5846a8a7fbe857567a3cc6533613194b998fe91636f70477ee443a5d74fea8" gracePeriod=30 Nov 22 07:43:51 crc kubenswrapper[4929]: I1122 07:43:51.828295 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" containerID="cri-o://453e798539b01813b6813b89f03b0b74fb79d7d3adc3616bf477691c690da79d" gracePeriod=30 Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.660318 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.697481 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.697584 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.164:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.768418 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.814640 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca\") pod \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.814776 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs\") pod \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.814818 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data\") pod \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.814876 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ct94w\" (UniqueName: \"kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w\") pod \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.814924 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle\") pod \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\" (UID: \"14af86c0-0c4b-46b7-b2c5-b69a35346d62\") " Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.815354 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs" (OuterVolumeSpecName: "logs") pod "14af86c0-0c4b-46b7-b2c5-b69a35346d62" (UID: "14af86c0-0c4b-46b7-b2c5-b69a35346d62"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.832894 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w" (OuterVolumeSpecName: "kube-api-access-ct94w") pod "14af86c0-0c4b-46b7-b2c5-b69a35346d62" (UID: "14af86c0-0c4b-46b7-b2c5-b69a35346d62"). InnerVolumeSpecName "kube-api-access-ct94w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.853749 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14af86c0-0c4b-46b7-b2c5-b69a35346d62" (UID: "14af86c0-0c4b-46b7-b2c5-b69a35346d62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.859079 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "14af86c0-0c4b-46b7-b2c5-b69a35346d62" (UID: "14af86c0-0c4b-46b7-b2c5-b69a35346d62"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.873446 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data" (OuterVolumeSpecName: "config-data") pod "14af86c0-0c4b-46b7-b2c5-b69a35346d62" (UID: "14af86c0-0c4b-46b7-b2c5-b69a35346d62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.877827 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"14af86c0-0c4b-46b7-b2c5-b69a35346d62","Type":"ContainerDied","Data":"602a02fb0d936a4b07e3cef8faa136d035e90f1dc4c2e2cfccb3d49a3ed8a010"} Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.877895 4929 scope.go:117] "RemoveContainer" containerID="505f8289cb33466275e04ef73d37f8f6843e0df97442a2e8e700e2aa6c93b895" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.878063 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.917652 4929 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.917693 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/14af86c0-0c4b-46b7-b2c5-b69a35346d62-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.917706 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.917718 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ct94w\" (UniqueName: \"kubernetes.io/projected/14af86c0-0c4b-46b7-b2c5-b69a35346d62-kube-api-access-ct94w\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.917731 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14af86c0-0c4b-46b7-b2c5-b69a35346d62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.981563 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:55 crc kubenswrapper[4929]: I1122 07:43:55.991346 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.025136 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:56 crc kubenswrapper[4929]: E1122 07:43:56.025640 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.025665 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" Nov 22 07:43:56 crc kubenswrapper[4929]: E1122 07:43:56.025698 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.025708 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.025934 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.025963 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" containerName="watcher-api-log" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.027181 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.036076 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.036256 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.039894 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.070548 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121190 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5143f7c3-b064-4290-96a9-fb164c215793-logs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121268 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121341 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121377 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-config-data\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121458 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5lg8\" (UniqueName: \"kubernetes.io/projected/5143f7c3-b064-4290-96a9-fb164c215793-kube-api-access-g5lg8\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121488 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.121522 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223474 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5143f7c3-b064-4290-96a9-fb164c215793-logs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223537 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223635 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-config-data\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223678 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5lg8\" (UniqueName: \"kubernetes.io/projected/5143f7c3-b064-4290-96a9-fb164c215793-kube-api-access-g5lg8\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223722 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.223758 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.224579 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5143f7c3-b064-4290-96a9-fb164c215793-logs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.231344 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-config-data\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.236031 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.236936 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.237944 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-public-tls-certs\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.244896 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/5143f7c3-b064-4290-96a9-fb164c215793-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.251000 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5lg8\" (UniqueName: \"kubernetes.io/projected/5143f7c3-b064-4290-96a9-fb164c215793-kube-api-access-g5lg8\") pod \"watcher-api-0\" (UID: \"5143f7c3-b064-4290-96a9-fb164c215793\") " pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.348600 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.889641 4929 generic.go:334] "Generic (PLEG): container finished" podID="89f5e117-d76e-446c-a83a-09737f044c1f" containerID="453e798539b01813b6813b89f03b0b74fb79d7d3adc3616bf477691c690da79d" exitCode=0 Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.889723 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerDied","Data":"453e798539b01813b6813b89f03b0b74fb79d7d3adc3616bf477691c690da79d"} Nov 22 07:43:56 crc kubenswrapper[4929]: I1122 07:43:56.948824 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:43:56 crc kubenswrapper[4929]: E1122 07:43:56.949076 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:43:57 crc kubenswrapper[4929]: I1122 07:43:57.960828 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14af86c0-0c4b-46b7-b2c5-b69a35346d62" path="/var/lib/kubelet/pods/14af86c0-0c4b-46b7-b2c5-b69a35346d62/volumes" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.772105 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.908625 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.909041 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.909138 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.909237 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttnn7\" (UniqueName: \"kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.909275 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.909387 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys\") pod \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\" (UID: \"b387b4f1-8159-4237-9e71-5235ffc8ac5a\") " Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.914908 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.915384 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.915529 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7" (OuterVolumeSpecName: "kube-api-access-ttnn7") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "kube-api-access-ttnn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.925371 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts" (OuterVolumeSpecName: "scripts") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.953494 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r9kl7" event={"ID":"b387b4f1-8159-4237-9e71-5235ffc8ac5a","Type":"ContainerDied","Data":"080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9"} Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.953545 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="080cd57fe31968c936d790b505b8aced37f5637eabd884a73e7433b54922e1a9" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.953552 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r9kl7" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.954466 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data" (OuterVolumeSpecName: "config-data") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:00 crc kubenswrapper[4929]: I1122 07:44:00.959862 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b387b4f1-8159-4237-9e71-5235ffc8ac5a" (UID: "b387b4f1-8159-4237-9e71-5235ffc8ac5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.011998 4929 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.012046 4929 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.012061 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.012074 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.012086 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttnn7\" (UniqueName: \"kubernetes.io/projected/b387b4f1-8159-4237-9e71-5235ffc8ac5a-kube-api-access-ttnn7\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.012098 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b387b4f1-8159-4237-9e71-5235ffc8ac5a-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.970669 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5fd7f9f4fc-7jzwk"] Nov 22 07:44:01 crc kubenswrapper[4929]: E1122 07:44:01.971160 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b387b4f1-8159-4237-9e71-5235ffc8ac5a" containerName="keystone-bootstrap" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.971179 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="b387b4f1-8159-4237-9e71-5235ffc8ac5a" containerName="keystone-bootstrap" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.971441 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="b387b4f1-8159-4237-9e71-5235ffc8ac5a" containerName="keystone-bootstrap" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.972359 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.975282 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.975468 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.977592 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.978490 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jg2tk" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.978667 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.984819 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5fd7f9f4fc-7jzwk"] Nov 22 07:44:01 crc kubenswrapper[4929]: I1122 07:44:01.994495 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.139722 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-combined-ca-bundle\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140156 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-credential-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140186 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-internal-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140224 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-fernet-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140242 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lx67t\" (UniqueName: \"kubernetes.io/projected/4320c60d-8c47-49d1-9fae-6118bfb228dc-kube-api-access-lx67t\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140308 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-public-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140358 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-scripts\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.140605 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-config-data\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242190 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-internal-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242266 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-fernet-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242285 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lx67t\" (UniqueName: \"kubernetes.io/projected/4320c60d-8c47-49d1-9fae-6118bfb228dc-kube-api-access-lx67t\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242313 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-public-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242365 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-scripts\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242414 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-config-data\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242444 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-combined-ca-bundle\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.242490 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-credential-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.246799 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-public-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.247022 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-combined-ca-bundle\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.247864 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-fernet-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.249063 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-credential-keys\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.249449 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-scripts\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.249974 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-config-data\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.250059 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4320c60d-8c47-49d1-9fae-6118bfb228dc-internal-tls-certs\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.263568 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lx67t\" (UniqueName: \"kubernetes.io/projected/4320c60d-8c47-49d1-9fae-6118bfb228dc-kube-api-access-lx67t\") pod \"keystone-5fd7f9f4fc-7jzwk\" (UID: \"4320c60d-8c47-49d1-9fae-6118bfb228dc\") " pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:02 crc kubenswrapper[4929]: I1122 07:44:02.342176 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:05 crc kubenswrapper[4929]: I1122 07:44:05.768225 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:44:10 crc kubenswrapper[4929]: I1122 07:44:10.947549 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:44:10 crc kubenswrapper[4929]: E1122 07:44:10.948046 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:44:15 crc kubenswrapper[4929]: I1122 07:44:15.768396 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:44:15 crc kubenswrapper[4929]: I1122 07:44:15.768879 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:44:16 crc kubenswrapper[4929]: E1122 07:44:16.504611 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:16 crc kubenswrapper[4929]: E1122 07:44:16.505183 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:16 crc kubenswrapper[4929]: E1122 07:44:16.505784 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:16 crc kubenswrapper[4929]: E1122 07:44:16.505838 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:44:19 crc kubenswrapper[4929]: I1122 07:44:19.148104 4929 generic.go:334] "Generic (PLEG): container finished" podID="57774149-a18c-475d-ad44-23687ced2981" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" exitCode=137 Nov 22 07:44:19 crc kubenswrapper[4929]: I1122 07:44:19.148154 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"57774149-a18c-475d-ad44-23687ced2981","Type":"ContainerDied","Data":"ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04"} Nov 22 07:44:22 crc kubenswrapper[4929]: I1122 07:44:22.191511 4929 generic.go:334] "Generic (PLEG): container finished" podID="89f5e117-d76e-446c-a83a-09737f044c1f" containerID="1e5846a8a7fbe857567a3cc6533613194b998fe91636f70477ee443a5d74fea8" exitCode=137 Nov 22 07:44:22 crc kubenswrapper[4929]: I1122 07:44:22.193174 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerDied","Data":"1e5846a8a7fbe857567a3cc6533613194b998fe91636f70477ee443a5d74fea8"} Nov 22 07:44:25 crc kubenswrapper[4929]: I1122 07:44:25.768843 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:44:25 crc kubenswrapper[4929]: I1122 07:44:25.947523 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:44:35 crc kubenswrapper[4929]: I1122 07:44:35.768727 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:44:45 crc kubenswrapper[4929]: I1122 07:44:45.769705 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-598887bdc4-mxhdk" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.162:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.162:8443: connect: connection refused" Nov 22 07:44:46 crc kubenswrapper[4929]: E1122 07:44:46.504282 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:46 crc kubenswrapper[4929]: E1122 07:44:46.504870 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:46 crc kubenswrapper[4929]: E1122 07:44:46.505456 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" cmd=["/usr/bin/pgrep","-f","-r","DRST","watcher-decision-engine"] Nov 22 07:44:46 crc kubenswrapper[4929]: E1122 07:44:46.505555 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-decision-engine-0" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:44:52 crc kubenswrapper[4929]: E1122 07:44:52.803647 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/sg-core:latest" Nov 22 07:44:52 crc kubenswrapper[4929]: E1122 07:44:52.804315 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:sg-core,Image:quay.io/openstack-k8s-operators/sg-core:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:sg-core-conf-yaml,ReadOnly:false,MountPath:/etc/sg-core.conf.yaml,SubPath:sg-core.conf.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4wj44,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(59621e9f-5111-473e-b99f-02b09934b5ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:44:52 crc kubenswrapper[4929]: I1122 07:44:52.807580 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:44:52 crc kubenswrapper[4929]: I1122 07:44:52.915462 4929 scope.go:117] "RemoveContainer" containerID="563836a5ede3a77d33f2d6d3b4b7365330d18888cf72c5732a3ead24c015fa98" Nov 22 07:44:52 crc kubenswrapper[4929]: E1122 07:44:52.918690 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 22 07:44:52 crc kubenswrapper[4929]: E1122 07:44:52.918841 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjgpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-9cvcv_openstack(7e5572bd-b4bf-4476-9247-06d7c892dcf1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:44:52 crc kubenswrapper[4929]: E1122 07:44:52.919941 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.076810 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.077248 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dc4mg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-r6lzk_openstack(4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.079537 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-r6lzk" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.149926 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.284356 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data\") pod \"57774149-a18c-475d-ad44-23687ced2981\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.285525 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-578qq\" (UniqueName: \"kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq\") pod \"57774149-a18c-475d-ad44-23687ced2981\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.285606 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle\") pod \"57774149-a18c-475d-ad44-23687ced2981\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.285702 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca\") pod \"57774149-a18c-475d-ad44-23687ced2981\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.285805 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs\") pod \"57774149-a18c-475d-ad44-23687ced2981\" (UID: \"57774149-a18c-475d-ad44-23687ced2981\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.285885 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.286858 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs" (OuterVolumeSpecName: "logs") pod "57774149-a18c-475d-ad44-23687ced2981" (UID: "57774149-a18c-475d-ad44-23687ced2981"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.296073 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq" (OuterVolumeSpecName: "kube-api-access-578qq") pod "57774149-a18c-475d-ad44-23687ced2981" (UID: "57774149-a18c-475d-ad44-23687ced2981"). InnerVolumeSpecName "kube-api-access-578qq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.328005 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "57774149-a18c-475d-ad44-23687ced2981" (UID: "57774149-a18c-475d-ad44-23687ced2981"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.334799 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57774149-a18c-475d-ad44-23687ced2981" (UID: "57774149-a18c-475d-ad44-23687ced2981"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.342955 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data" (OuterVolumeSpecName: "config-data") pod "57774149-a18c-475d-ad44-23687ced2981" (UID: "57774149-a18c-475d-ad44-23687ced2981"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.387943 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.387992 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388044 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388139 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5q55f\" (UniqueName: \"kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388221 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388239 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388300 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs\") pod \"89f5e117-d76e-446c-a83a-09737f044c1f\" (UID: \"89f5e117-d76e-446c-a83a-09737f044c1f\") " Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388675 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-578qq\" (UniqueName: \"kubernetes.io/projected/57774149-a18c-475d-ad44-23687ced2981-kube-api-access-578qq\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388687 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388695 4929 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388704 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57774149-a18c-475d-ad44-23687ced2981-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.388714 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57774149-a18c-475d-ad44-23687ced2981-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.389071 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs" (OuterVolumeSpecName: "logs") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.393387 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f" (OuterVolumeSpecName: "kube-api-access-5q55f") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "kube-api-access-5q55f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.393861 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.412623 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.421789 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts" (OuterVolumeSpecName: "scripts") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.427904 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data" (OuterVolumeSpecName: "config-data") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.435090 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "89f5e117-d76e-446c-a83a-09737f044c1f" (UID: "89f5e117-d76e-446c-a83a-09737f044c1f"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490267 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490301 4929 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490312 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89f5e117-d76e-446c-a83a-09737f044c1f-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490320 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/89f5e117-d76e-446c-a83a-09737f044c1f-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490329 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490337 4929 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/89f5e117-d76e-446c-a83a-09737f044c1f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.490345 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5q55f\" (UniqueName: \"kubernetes.io/projected/89f5e117-d76e-446c-a83a-09737f044c1f-kube-api-access-5q55f\") on node \"crc\" DevicePath \"\"" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.557686 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Nov 22 07:44:53 crc kubenswrapper[4929]: W1122 07:44:53.559707 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5143f7c3_b064_4290_96a9_fb164c215793.slice/crio-e07990f364b2cefedb72bc0e6d72d1f2b664f0fbf771843b6fd6b6cdc9ec3f58 WatchSource:0}: Error finding container e07990f364b2cefedb72bc0e6d72d1f2b664f0fbf771843b6fd6b6cdc9ec3f58: Status 404 returned error can't find the container with id e07990f364b2cefedb72bc0e6d72d1f2b664f0fbf771843b6fd6b6cdc9ec3f58 Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.567095 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e"} Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.572466 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rsm44" event={"ID":"155c1bdd-4b26-4059-8ab7-7a6299bc17c9","Type":"ContainerStarted","Data":"4c8d130ba2bfc229564add643c163fd3b2720216177366f184bd57174725d9b2"} Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.573934 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.573923 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"57774149-a18c-475d-ad44-23687ced2981","Type":"ContainerDied","Data":"1b40cb180d5c57abdfbd8cfbea1debedca0e4975c4e43d26cf5a212170d3fba4"} Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.574107 4929 scope.go:117] "RemoveContainer" containerID="ca32da327cd1ce0eae295820c06e29e5597e99e7fd6cc92799005f33326f0e04" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.576718 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-598887bdc4-mxhdk" event={"ID":"89f5e117-d76e-446c-a83a-09737f044c1f","Type":"ContainerDied","Data":"51fbd5d9a4b00913f47b4d25f22cc09ebafceadb61dda510125a727434f688f0"} Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.577179 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-598887bdc4-mxhdk" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.611776 4929 scope.go:117] "RemoveContainer" containerID="453e798539b01813b6813b89f03b0b74fb79d7d3adc3616bf477691c690da79d" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.615657 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.654811 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-598887bdc4-mxhdk"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.662500 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.667468 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.675808 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5fd7f9f4fc-7jzwk"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.682454 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.682883 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.682958 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.683021 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.683072 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:44:53 crc kubenswrapper[4929]: E1122 07:44:53.683140 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon-log" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.683190 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon-log" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.683650 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.683735 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="57774149-a18c-475d-ad44-23687ced2981" containerName="watcher-decision-engine" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.683801 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" containerName="horizon-log" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.684518 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.686631 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.694032 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.792378 4929 scope.go:117] "RemoveContainer" containerID="1e5846a8a7fbe857567a3cc6533613194b998fe91636f70477ee443a5d74fea8" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.794439 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.794606 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.794976 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx5qr\" (UniqueName: \"kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.795115 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.795278 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: W1122 07:44:53.797411 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4320c60d_8c47_49d1_9fae_6118bfb228dc.slice/crio-b2fa4da5ddbcb16a16bfd829ba507d78cf4283bb7f64455b17b32320c80c61ff WatchSource:0}: Error finding container b2fa4da5ddbcb16a16bfd829ba507d78cf4283bb7f64455b17b32320c80c61ff: Status 404 returned error can't find the container with id b2fa4da5ddbcb16a16bfd829ba507d78cf4283bb7f64455b17b32320c80c61ff Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.896955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx5qr\" (UniqueName: \"kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.897030 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.897179 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.897748 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.899019 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.899079 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.923456 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.923778 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.923817 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.928795 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx5qr\" (UniqueName: \"kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr\") pod \"watcher-decision-engine-0\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.979669 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57774149-a18c-475d-ad44-23687ced2981" path="/var/lib/kubelet/pods/57774149-a18c-475d-ad44-23687ced2981/volumes" Nov 22 07:44:53 crc kubenswrapper[4929]: I1122 07:44:53.982327 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89f5e117-d76e-446c-a83a-09737f044c1f" path="/var/lib/kubelet/pods/89f5e117-d76e-446c-a83a-09737f044c1f/volumes" Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.011435 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.531651 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:44:54 crc kubenswrapper[4929]: W1122 07:44:54.538559 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6b33a22_ad43_4a85_bcb7_8345455b171c.slice/crio-9c3a2c976bf5d48b9f7164f7bf47eafa444109f9585fbf0ed75629d492e58c88 WatchSource:0}: Error finding container 9c3a2c976bf5d48b9f7164f7bf47eafa444109f9585fbf0ed75629d492e58c88: Status 404 returned error can't find the container with id 9c3a2c976bf5d48b9f7164f7bf47eafa444109f9585fbf0ed75629d492e58c88 Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.590954 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5fd7f9f4fc-7jzwk" event={"ID":"4320c60d-8c47-49d1-9fae-6118bfb228dc","Type":"ContainerStarted","Data":"b2fa4da5ddbcb16a16bfd829ba507d78cf4283bb7f64455b17b32320c80c61ff"} Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.593276 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a6b33a22-ad43-4a85-bcb7-8345455b171c","Type":"ContainerStarted","Data":"9c3a2c976bf5d48b9f7164f7bf47eafa444109f9585fbf0ed75629d492e58c88"} Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.595945 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5143f7c3-b064-4290-96a9-fb164c215793","Type":"ContainerStarted","Data":"53bd0ed79bd45fd10134e63909b025d9b0fee25c3b5a66aa35cfd933c990b626"} Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.596038 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5143f7c3-b064-4290-96a9-fb164c215793","Type":"ContainerStarted","Data":"e07990f364b2cefedb72bc0e6d72d1f2b664f0fbf771843b6fd6b6cdc9ec3f58"} Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.618954 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"524a1cf1-3627-4e2e-ae71-7648ba4462fa","Type":"ContainerStarted","Data":"53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6"} Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.655708 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=4.116379867 podStartE2EDuration="2m39.655680824s" podCreationTimestamp="2025-11-22 07:42:15 +0000 UTC" firstStartedPulling="2025-11-22 07:42:17.363773568 +0000 UTC m=+1874.473227581" lastFinishedPulling="2025-11-22 07:44:52.903074525 +0000 UTC m=+2030.012528538" observedRunningTime="2025-11-22 07:44:54.64024467 +0000 UTC m=+2031.749698743" watchObservedRunningTime="2025-11-22 07:44:54.655680824 +0000 UTC m=+2031.765134867" Nov 22 07:44:54 crc kubenswrapper[4929]: I1122 07:44:54.660669 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-rsm44" podStartSLOduration=37.296792925 podStartE2EDuration="2m38.660645904s" podCreationTimestamp="2025-11-22 07:42:16 +0000 UTC" firstStartedPulling="2025-11-22 07:42:18.030151301 +0000 UTC m=+1875.139605314" lastFinishedPulling="2025-11-22 07:44:19.39400427 +0000 UTC m=+1996.503458293" observedRunningTime="2025-11-22 07:44:54.657103118 +0000 UTC m=+2031.766557131" watchObservedRunningTime="2025-11-22 07:44:54.660645904 +0000 UTC m=+2031.770099927" Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.639085 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5fd7f9f4fc-7jzwk" event={"ID":"4320c60d-8c47-49d1-9fae-6118bfb228dc","Type":"ContainerStarted","Data":"a65c7e691408e27f0daed6b1a9b9cc45b5bd1c2cbe185b60c9a32864eb997f9b"} Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.640237 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.647224 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a6b33a22-ad43-4a85-bcb7-8345455b171c","Type":"ContainerStarted","Data":"8becd2cfdef3a29b1447b0bafded6a8d87437880ebb31f22a87d12f27f0bea6f"} Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.650756 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"5143f7c3-b064-4290-96a9-fb164c215793","Type":"ContainerStarted","Data":"f30fd96119cf56ad9d6a0001718efca6ea38d0a5d6b771d3e2697a994ba08020"} Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.650968 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.660947 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5fd7f9f4fc-7jzwk" podStartSLOduration=54.660927879 podStartE2EDuration="54.660927879s" podCreationTimestamp="2025-11-22 07:44:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:44:55.659425613 +0000 UTC m=+2032.768879646" watchObservedRunningTime="2025-11-22 07:44:55.660927879 +0000 UTC m=+2032.770381902" Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.741540 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=60.741520583 podStartE2EDuration="1m0.741520583s" podCreationTimestamp="2025-11-22 07:43:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:44:55.728868776 +0000 UTC m=+2032.838322789" watchObservedRunningTime="2025-11-22 07:44:55.741520583 +0000 UTC m=+2032.850974596" Nov 22 07:44:55 crc kubenswrapper[4929]: I1122 07:44:55.768072 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.768050296 podStartE2EDuration="2.768050296s" podCreationTimestamp="2025-11-22 07:44:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:44:55.744403453 +0000 UTC m=+2032.853857486" watchObservedRunningTime="2025-11-22 07:44:55.768050296 +0000 UTC m=+2032.877504309" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.195884 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.196020 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.291310 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.349482 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.349612 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.705356 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 22 07:44:56 crc kubenswrapper[4929]: I1122 07:44:56.753876 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:44:57 crc kubenswrapper[4929]: I1122 07:44:57.354410 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="5143f7c3-b064-4290-96a9-fb164c215793" containerName="watcher-api-log" probeResult="failure" output="Get \"https://10.217.0.166:9322/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:44:57 crc kubenswrapper[4929]: I1122 07:44:57.668688 4929 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 07:44:57 crc kubenswrapper[4929]: I1122 07:44:57.857822 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 22 07:44:58 crc kubenswrapper[4929]: I1122 07:44:58.677939 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" containerID="cri-o://53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" gracePeriod=30 Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.153359 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr"] Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.154904 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.157366 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.157843 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.165775 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr"] Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.223626 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.223899 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nktgx\" (UniqueName: \"kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.223970 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.325784 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.326075 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nktgx\" (UniqueName: \"kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.326099 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.326896 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.339385 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.343884 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nktgx\" (UniqueName: \"kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx\") pod \"collect-profiles-29396625-l5tmr\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:00 crc kubenswrapper[4929]: I1122 07:45:00.529781 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:01 crc kubenswrapper[4929]: W1122 07:45:01.005523 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb106667_a593_4ad0_9170_c6c91ae46a7a.slice/crio-352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c WatchSource:0}: Error finding container 352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c: Status 404 returned error can't find the container with id 352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c Nov 22 07:45:01 crc kubenswrapper[4929]: I1122 07:45:01.005661 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr"] Nov 22 07:45:01 crc kubenswrapper[4929]: E1122 07:45:01.197867 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:01 crc kubenswrapper[4929]: E1122 07:45:01.200741 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:01 crc kubenswrapper[4929]: E1122 07:45:01.201944 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:01 crc kubenswrapper[4929]: E1122 07:45:01.201978 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:01 crc kubenswrapper[4929]: I1122 07:45:01.712584 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" event={"ID":"db106667-a593-4ad0-9170-c6c91ae46a7a","Type":"ContainerStarted","Data":"352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c"} Nov 22 07:45:02 crc kubenswrapper[4929]: I1122 07:45:02.727372 4929 generic.go:334] "Generic (PLEG): container finished" podID="db106667-a593-4ad0-9170-c6c91ae46a7a" containerID="fc9cc4960c7c09d4ced9ebd27ba8db8053879773f588a53ca39bf574902e9d8c" exitCode=0 Nov 22 07:45:02 crc kubenswrapper[4929]: I1122 07:45:02.727484 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" event={"ID":"db106667-a593-4ad0-9170-c6c91ae46a7a","Type":"ContainerDied","Data":"fc9cc4960c7c09d4ced9ebd27ba8db8053879773f588a53ca39bf574902e9d8c"} Nov 22 07:45:04 crc kubenswrapper[4929]: I1122 07:45:04.012885 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 22 07:45:04 crc kubenswrapper[4929]: I1122 07:45:04.040285 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 22 07:45:04 crc kubenswrapper[4929]: I1122 07:45:04.745621 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 22 07:45:04 crc kubenswrapper[4929]: I1122 07:45:04.775238 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 22 07:45:04 crc kubenswrapper[4929]: E1122 07:45:04.951168 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-r6lzk" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.348289 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.429088 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume\") pod \"db106667-a593-4ad0-9170-c6c91ae46a7a\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.429287 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume\") pod \"db106667-a593-4ad0-9170-c6c91ae46a7a\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.429429 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nktgx\" (UniqueName: \"kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx\") pod \"db106667-a593-4ad0-9170-c6c91ae46a7a\" (UID: \"db106667-a593-4ad0-9170-c6c91ae46a7a\") " Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.429965 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume" (OuterVolumeSpecName: "config-volume") pod "db106667-a593-4ad0-9170-c6c91ae46a7a" (UID: "db106667-a593-4ad0-9170-c6c91ae46a7a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.435574 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "db106667-a593-4ad0-9170-c6c91ae46a7a" (UID: "db106667-a593-4ad0-9170-c6c91ae46a7a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.435581 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx" (OuterVolumeSpecName: "kube-api-access-nktgx") pod "db106667-a593-4ad0-9170-c6c91ae46a7a" (UID: "db106667-a593-4ad0-9170-c6c91ae46a7a"). InnerVolumeSpecName "kube-api-access-nktgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.531018 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nktgx\" (UniqueName: \"kubernetes.io/projected/db106667-a593-4ad0-9170-c6c91ae46a7a-kube-api-access-nktgx\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.531317 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/db106667-a593-4ad0-9170-c6c91ae46a7a-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.531327 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/db106667-a593-4ad0-9170-c6c91ae46a7a-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.755959 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" event={"ID":"db106667-a593-4ad0-9170-c6c91ae46a7a","Type":"ContainerDied","Data":"352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c"} Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.755983 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr" Nov 22 07:45:05 crc kubenswrapper[4929]: I1122 07:45:05.756002 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="352a4385eb488583df8c4634f4374f5daa47a534733812a0e7f861b1a0261f6c" Nov 22 07:45:06 crc kubenswrapper[4929]: E1122 07:45:06.197636 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:06 crc kubenswrapper[4929]: E1122 07:45:06.199052 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:06 crc kubenswrapper[4929]: E1122 07:45:06.200740 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:06 crc kubenswrapper[4929]: E1122 07:45:06.200895 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:06 crc kubenswrapper[4929]: I1122 07:45:06.363169 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Nov 22 07:45:06 crc kubenswrapper[4929]: I1122 07:45:06.374274 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Nov 22 07:45:06 crc kubenswrapper[4929]: I1122 07:45:06.423704 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm"] Nov 22 07:45:06 crc kubenswrapper[4929]: I1122 07:45:06.437504 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396580-z9fcm"] Nov 22 07:45:07 crc kubenswrapper[4929]: E1122 07:45:07.948885 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:45:07 crc kubenswrapper[4929]: I1122 07:45:07.961610 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a95c39bb-f2ae-4a5c-897f-1ac3a476c436" path="/var/lib/kubelet/pods/a95c39bb-f2ae-4a5c-897f-1ac3a476c436/volumes" Nov 22 07:45:08 crc kubenswrapper[4929]: I1122 07:45:08.308848 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5fd7f9f4fc-7jzwk" Nov 22 07:45:09 crc kubenswrapper[4929]: E1122 07:45:09.956192 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"sg-core\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" Nov 22 07:45:10 crc kubenswrapper[4929]: I1122 07:45:10.801822 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerStarted","Data":"9d14f3895611e41e2940c1ca0b034dbf15e61f71d901277f27031183315c706a"} Nov 22 07:45:10 crc kubenswrapper[4929]: I1122 07:45:10.801990 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="ceilometer-notification-agent" containerID="cri-o://bf9683a4cb4ff10a632572acd7c666e0d095c304d84db4d4a0b2a39ba0e4b7ac" gracePeriod=30 Nov 22 07:45:10 crc kubenswrapper[4929]: I1122 07:45:10.802062 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:45:10 crc kubenswrapper[4929]: I1122 07:45:10.802107 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="proxy-httpd" containerID="cri-o://9d14f3895611e41e2940c1ca0b034dbf15e61f71d901277f27031183315c706a" gracePeriod=30 Nov 22 07:45:11 crc kubenswrapper[4929]: E1122 07:45:11.197983 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:11 crc kubenswrapper[4929]: E1122 07:45:11.202849 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:11 crc kubenswrapper[4929]: E1122 07:45:11.205312 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:11 crc kubenswrapper[4929]: E1122 07:45:11.205415 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:11 crc kubenswrapper[4929]: I1122 07:45:11.818088 4929 generic.go:334] "Generic (PLEG): container finished" podID="59621e9f-5111-473e-b99f-02b09934b5ad" containerID="9d14f3895611e41e2940c1ca0b034dbf15e61f71d901277f27031183315c706a" exitCode=0 Nov 22 07:45:11 crc kubenswrapper[4929]: I1122 07:45:11.818412 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerDied","Data":"9d14f3895611e41e2940c1ca0b034dbf15e61f71d901277f27031183315c706a"} Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.123920 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-5b65879b9f-vjcj8"] Nov 22 07:45:13 crc kubenswrapper[4929]: E1122 07:45:13.129609 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db106667-a593-4ad0-9170-c6c91ae46a7a" containerName="collect-profiles" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.129652 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="db106667-a593-4ad0-9170-c6c91ae46a7a" containerName="collect-profiles" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.129961 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="db106667-a593-4ad0-9170-c6c91ae46a7a" containerName="collect-profiles" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.131198 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.135827 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.136149 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.141851 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b65879b9f-vjcj8"] Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.144047 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296124 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-internal-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296642 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-run-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296673 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn2zk\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-kube-api-access-fn2zk\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296694 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-log-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296739 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-config-data\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296758 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-public-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296780 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-etc-swift\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.296800 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-combined-ca-bundle\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.372750 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.374552 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.377803 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.378060 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-tqh9r" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.379880 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398066 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-config-data\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398133 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-public-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398156 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-etc-swift\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398177 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-combined-ca-bundle\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-internal-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398480 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-run-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398504 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn2zk\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-kube-api-access-fn2zk\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398520 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-log-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.398976 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-log-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.399126 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-run-httpd\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.403253 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.406099 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-public-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.406814 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-etc-swift\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.408591 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-config-data\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.420237 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-combined-ca-bundle\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.420849 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn2zk\" (UniqueName: \"kubernetes.io/projected/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-kube-api-access-fn2zk\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.426286 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3971fe14-e65d-4a66-a0ce-d004f1f4e0f1-internal-tls-certs\") pod \"swift-proxy-5b65879b9f-vjcj8\" (UID: \"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1\") " pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.466274 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.501173 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.501314 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-openstack-config-secret\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.501434 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/72671250-e761-4371-aac4-4789b677d1d7-openstack-config\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.501817 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-757f8\" (UniqueName: \"kubernetes.io/projected/72671250-e761-4371-aac4-4789b677d1d7-kube-api-access-757f8\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.603469 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-757f8\" (UniqueName: \"kubernetes.io/projected/72671250-e761-4371-aac4-4789b677d1d7-kube-api-access-757f8\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.604067 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.604092 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-openstack-config-secret\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.604301 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/72671250-e761-4371-aac4-4789b677d1d7-openstack-config\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.605179 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/72671250-e761-4371-aac4-4789b677d1d7-openstack-config\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.608479 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-openstack-config-secret\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.610003 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72671250-e761-4371-aac4-4789b677d1d7-combined-ca-bundle\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.624382 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-757f8\" (UniqueName: \"kubernetes.io/projected/72671250-e761-4371-aac4-4789b677d1d7-kube-api-access-757f8\") pod \"openstackclient\" (UID: \"72671250-e761-4371-aac4-4789b677d1d7\") " pod="openstack/openstackclient" Nov 22 07:45:13 crc kubenswrapper[4929]: I1122 07:45:13.695079 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 07:45:14 crc kubenswrapper[4929]: I1122 07:45:14.169791 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 07:45:14 crc kubenswrapper[4929]: W1122 07:45:14.173716 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod72671250_e761_4371_aac4_4789b677d1d7.slice/crio-0ddbfcd08b55b3e67f226c1e67a5bb7578920cd4fa9286e830ba73008334065f WatchSource:0}: Error finding container 0ddbfcd08b55b3e67f226c1e67a5bb7578920cd4fa9286e830ba73008334065f: Status 404 returned error can't find the container with id 0ddbfcd08b55b3e67f226c1e67a5bb7578920cd4fa9286e830ba73008334065f Nov 22 07:45:14 crc kubenswrapper[4929]: I1122 07:45:14.717296 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-5b65879b9f-vjcj8"] Nov 22 07:45:14 crc kubenswrapper[4929]: I1122 07:45:14.846706 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"72671250-e761-4371-aac4-4789b677d1d7","Type":"ContainerStarted","Data":"0ddbfcd08b55b3e67f226c1e67a5bb7578920cd4fa9286e830ba73008334065f"} Nov 22 07:45:14 crc kubenswrapper[4929]: I1122 07:45:14.848121 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b65879b9f-vjcj8" event={"ID":"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1","Type":"ContainerStarted","Data":"ef4758742ed6c45d2a73fddff5bfe28240cc9022c6b77079fa3de513d30cf1eb"} Nov 22 07:45:16 crc kubenswrapper[4929]: E1122 07:45:16.198624 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:16 crc kubenswrapper[4929]: E1122 07:45:16.201962 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:16 crc kubenswrapper[4929]: E1122 07:45:16.204203 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:16 crc kubenswrapper[4929]: E1122 07:45:16.204282 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:17 crc kubenswrapper[4929]: I1122 07:45:17.879072 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b65879b9f-vjcj8" event={"ID":"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1","Type":"ContainerStarted","Data":"6b795b6a2a7b30bb8d5e5397adcf19b3a4b38ff864be3cb56138c5ec5edb3fbf"} Nov 22 07:45:17 crc kubenswrapper[4929]: I1122 07:45:17.882611 4929 generic.go:334] "Generic (PLEG): container finished" podID="59621e9f-5111-473e-b99f-02b09934b5ad" containerID="bf9683a4cb4ff10a632572acd7c666e0d095c304d84db4d4a0b2a39ba0e4b7ac" exitCode=0 Nov 22 07:45:17 crc kubenswrapper[4929]: I1122 07:45:17.882639 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerDied","Data":"bf9683a4cb4ff10a632572acd7c666e0d095c304d84db4d4a0b2a39ba0e4b7ac"} Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.071452 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.180884 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wj44\" (UniqueName: \"kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181005 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181079 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181126 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181172 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181327 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.181393 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd\") pod \"59621e9f-5111-473e-b99f-02b09934b5ad\" (UID: \"59621e9f-5111-473e-b99f-02b09934b5ad\") " Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.183821 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.184123 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.190418 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44" (OuterVolumeSpecName: "kube-api-access-4wj44") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "kube-api-access-4wj44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.192163 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.193036 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts" (OuterVolumeSpecName: "scripts") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.254949 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.260535 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data" (OuterVolumeSpecName: "config-data") pod "59621e9f-5111-473e-b99f-02b09934b5ad" (UID: "59621e9f-5111-473e-b99f-02b09934b5ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283323 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283366 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283378 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283396 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283407 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/59621e9f-5111-473e-b99f-02b09934b5ad-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283419 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wj44\" (UniqueName: \"kubernetes.io/projected/59621e9f-5111-473e-b99f-02b09934b5ad-kube-api-access-4wj44\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.283429 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59621e9f-5111-473e-b99f-02b09934b5ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.899159 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-5b65879b9f-vjcj8" event={"ID":"3971fe14-e65d-4a66-a0ce-d004f1f4e0f1","Type":"ContainerStarted","Data":"39fe94ad31491f5a7ac01c67d6eac94157e1b969b122badff7c70d6aef406a61"} Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.899371 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.902969 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"59621e9f-5111-473e-b99f-02b09934b5ad","Type":"ContainerDied","Data":"36f331230e4d83355996655a111c2a52736a75ab217475b4fbae05e9c8c70e3a"} Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.903021 4929 scope.go:117] "RemoveContainer" containerID="9d14f3895611e41e2940c1ca0b034dbf15e61f71d901277f27031183315c706a" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.903169 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.927826 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-5b65879b9f-vjcj8" podStartSLOduration=5.92779448 podStartE2EDuration="5.92779448s" podCreationTimestamp="2025-11-22 07:45:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:45:18.921677702 +0000 UTC m=+2056.031131725" watchObservedRunningTime="2025-11-22 07:45:18.92779448 +0000 UTC m=+2056.037248493" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.941847 4929 scope.go:117] "RemoveContainer" containerID="bf9683a4cb4ff10a632572acd7c666e0d095c304d84db4d4a0b2a39ba0e4b7ac" Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.963368 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:18 crc kubenswrapper[4929]: I1122 07:45:18.995455 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.014227 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:19 crc kubenswrapper[4929]: E1122 07:45:19.017123 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="ceilometer-notification-agent" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.017292 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="ceilometer-notification-agent" Nov 22 07:45:19 crc kubenswrapper[4929]: E1122 07:45:19.017391 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="proxy-httpd" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.017448 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="proxy-httpd" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.017938 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="proxy-httpd" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.018029 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" containerName="ceilometer-notification-agent" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.022085 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.024605 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.025029 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.031491 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.101798 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102407 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102433 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcpt6\" (UniqueName: \"kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102464 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102496 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102743 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.102836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205095 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205156 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205236 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205271 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205295 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcpt6\" (UniqueName: \"kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205329 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205363 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.205873 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.206110 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.211045 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.211058 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.222423 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.224627 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcpt6\" (UniqueName: \"kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.225596 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.358134 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.796695 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:19 crc kubenswrapper[4929]: W1122 07:45:19.802846 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea9f837e_fbb6_493b_ba22_3e51e744e3c8.slice/crio-4839bbff2d89f7446de65346abe0b15cab2291d1e44f30af146697f333d31335 WatchSource:0}: Error finding container 4839bbff2d89f7446de65346abe0b15cab2291d1e44f30af146697f333d31335: Status 404 returned error can't find the container with id 4839bbff2d89f7446de65346abe0b15cab2291d1e44f30af146697f333d31335 Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.915907 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerStarted","Data":"4839bbff2d89f7446de65346abe0b15cab2291d1e44f30af146697f333d31335"} Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.917429 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:19 crc kubenswrapper[4929]: I1122 07:45:19.958728 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59621e9f-5111-473e-b99f-02b09934b5ad" path="/var/lib/kubelet/pods/59621e9f-5111-473e-b99f-02b09934b5ad/volumes" Nov 22 07:45:21 crc kubenswrapper[4929]: E1122 07:45:21.198078 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:21 crc kubenswrapper[4929]: E1122 07:45:21.201177 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:21 crc kubenswrapper[4929]: E1122 07:45:21.205793 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:21 crc kubenswrapper[4929]: E1122 07:45:21.206363 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:21 crc kubenswrapper[4929]: E1122 07:45:21.949324 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-9cvcv" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" Nov 22 07:45:23 crc kubenswrapper[4929]: I1122 07:45:23.474202 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:23 crc kubenswrapper[4929]: I1122 07:45:23.477059 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-5b65879b9f-vjcj8" Nov 22 07:45:26 crc kubenswrapper[4929]: E1122 07:45:26.199788 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:26 crc kubenswrapper[4929]: E1122 07:45:26.202816 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:26 crc kubenswrapper[4929]: E1122 07:45:26.205598 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:26 crc kubenswrapper[4929]: E1122 07:45:26.205673 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:30 crc kubenswrapper[4929]: I1122 07:45:30.022202 4929 generic.go:334] "Generic (PLEG): container finished" podID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" exitCode=137 Nov 22 07:45:30 crc kubenswrapper[4929]: I1122 07:45:30.022385 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"524a1cf1-3627-4e2e-ae71-7648ba4462fa","Type":"ContainerDied","Data":"53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6"} Nov 22 07:45:31 crc kubenswrapper[4929]: E1122 07:45:31.323367 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:31 crc kubenswrapper[4929]: E1122 07:45:31.323721 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:31 crc kubenswrapper[4929]: E1122 07:45:31.324406 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:31 crc kubenswrapper[4929]: E1122 07:45:31.324434 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:33 crc kubenswrapper[4929]: I1122 07:45:33.845104 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:45:36 crc kubenswrapper[4929]: E1122 07:45:36.195773 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:36 crc kubenswrapper[4929]: E1122 07:45:36.196330 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:36 crc kubenswrapper[4929]: E1122 07:45:36.196689 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:36 crc kubenswrapper[4929]: E1122 07:45:36.196723 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:41 crc kubenswrapper[4929]: E1122 07:45:41.196819 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:41 crc kubenswrapper[4929]: E1122 07:45:41.198373 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:41 crc kubenswrapper[4929]: E1122 07:45:41.198687 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:41 crc kubenswrapper[4929]: E1122 07:45:41.198805 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.196048 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.197042 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.197689 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" cmd=["/usr/bin/pgrep","-r","DRST","watcher-applier"] Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.197730 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6 is running failed: container process not found" probeType="Readiness" pod="openstack/watcher-applier-0" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.615722 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.616188 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h699h59ch598h5dbh6ch65bh57h5b8h55fh96h687h594h57ch58bh68dh569h646h85h5b5hbch7ch6fh8fh55fh586h588h5dbhbdh64dh595h5c9q,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_CA_CERT,Value:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-757f8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(72671250-e761-4371-aac4-4789b677d1d7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:45:46 crc kubenswrapper[4929]: E1122 07:45:46.617367 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="72671250-e761-4371-aac4-4789b677d1d7" Nov 22 07:45:46 crc kubenswrapper[4929]: I1122 07:45:46.990746 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.136581 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6zvj\" (UniqueName: \"kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj\") pod \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.137016 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data\") pod \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.137084 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle\") pod \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.137164 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs\") pod \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\" (UID: \"524a1cf1-3627-4e2e-ae71-7648ba4462fa\") " Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.137607 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs" (OuterVolumeSpecName: "logs") pod "524a1cf1-3627-4e2e-ae71-7648ba4462fa" (UID: "524a1cf1-3627-4e2e-ae71-7648ba4462fa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.137965 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/524a1cf1-3627-4e2e-ae71-7648ba4462fa-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.143408 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj" (OuterVolumeSpecName: "kube-api-access-j6zvj") pod "524a1cf1-3627-4e2e-ae71-7648ba4462fa" (UID: "524a1cf1-3627-4e2e-ae71-7648ba4462fa"). InnerVolumeSpecName "kube-api-access-j6zvj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.161880 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "524a1cf1-3627-4e2e-ae71-7648ba4462fa" (UID: "524a1cf1-3627-4e2e-ae71-7648ba4462fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.198285 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data" (OuterVolumeSpecName: "config-data") pod "524a1cf1-3627-4e2e-ae71-7648ba4462fa" (UID: "524a1cf1-3627-4e2e-ae71-7648ba4462fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.199553 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"524a1cf1-3627-4e2e-ae71-7648ba4462fa","Type":"ContainerDied","Data":"f3393fe110150a3c12059e5d2f96d34e8ab430540f60837a55d208a53b8b2b91"} Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.199594 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.199610 4929 scope.go:117] "RemoveContainer" containerID="53e5ca4bee233df49f707d7517b960c2ecbf52f48d1f8ed1ad60a40bb3ec53a6" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.201700 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerStarted","Data":"5435817da713c371d075af4f7f523dae3d17a03b03fef1b71fd27ba1aa4a2e24"} Nov 22 07:45:47 crc kubenswrapper[4929]: E1122 07:45:47.205626 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="72671250-e761-4371-aac4-4789b677d1d7" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.239248 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6zvj\" (UniqueName: \"kubernetes.io/projected/524a1cf1-3627-4e2e-ae71-7648ba4462fa-kube-api-access-j6zvj\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.239279 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.239289 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/524a1cf1-3627-4e2e-ae71-7648ba4462fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.264067 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.281840 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.295264 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:45:47 crc kubenswrapper[4929]: E1122 07:45:47.295688 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.295706 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.295887 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" containerName="watcher-applier" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.296492 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.298796 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.308666 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.445259 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcg7w\" (UniqueName: \"kubernetes.io/projected/b26c616d-54c4-4a6c-aef6-4f17f7442138-kube-api-access-jcg7w\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.445316 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-config-data\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.445369 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.445386 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b26c616d-54c4-4a6c-aef6-4f17f7442138-logs\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.547919 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcg7w\" (UniqueName: \"kubernetes.io/projected/b26c616d-54c4-4a6c-aef6-4f17f7442138-kube-api-access-jcg7w\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.547985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-config-data\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.548062 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.548088 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b26c616d-54c4-4a6c-aef6-4f17f7442138-logs\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.548827 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b26c616d-54c4-4a6c-aef6-4f17f7442138-logs\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.552998 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.553024 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b26c616d-54c4-4a6c-aef6-4f17f7442138-config-data\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.568346 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcg7w\" (UniqueName: \"kubernetes.io/projected/b26c616d-54c4-4a6c-aef6-4f17f7442138-kube-api-access-jcg7w\") pod \"watcher-applier-0\" (UID: \"b26c616d-54c4-4a6c-aef6-4f17f7442138\") " pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.618594 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Nov 22 07:45:47 crc kubenswrapper[4929]: I1122 07:45:47.967454 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="524a1cf1-3627-4e2e-ae71-7648ba4462fa" path="/var/lib/kubelet/pods/524a1cf1-3627-4e2e-ae71-7648ba4462fa/volumes" Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.213192 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r6lzk" event={"ID":"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c","Type":"ContainerStarted","Data":"5cc0a46aa7a491ff2d9f413a47650e35c1e27c38bbf7da737dba0db4e69791b5"} Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.216651 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9cvcv" event={"ID":"7e5572bd-b4bf-4476-9247-06d7c892dcf1","Type":"ContainerStarted","Data":"fdbc2fa7daba9dc24b636806a887c51ccd87efa8c07ad9d0eef10f7bb914a961"} Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.221127 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerStarted","Data":"3b2c4394bcc14ba1ebb37813a9ead7ad0759df601a40047fd37e6d0eb65dcd15"} Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.241885 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-r6lzk" podStartSLOduration=3.4508068290000002 podStartE2EDuration="3m32.241856379s" podCreationTimestamp="2025-11-22 07:42:16 +0000 UTC" firstStartedPulling="2025-11-22 07:42:17.900913502 +0000 UTC m=+1875.010367515" lastFinishedPulling="2025-11-22 07:45:46.691963052 +0000 UTC m=+2083.801417065" observedRunningTime="2025-11-22 07:45:48.235615438 +0000 UTC m=+2085.345069451" watchObservedRunningTime="2025-11-22 07:45:48.241856379 +0000 UTC m=+2085.351310412" Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.269633 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-9cvcv" podStartSLOduration=2.930942195 podStartE2EDuration="3m32.269611082s" podCreationTimestamp="2025-11-22 07:42:16 +0000 UTC" firstStartedPulling="2025-11-22 07:42:17.671783895 +0000 UTC m=+1874.781237908" lastFinishedPulling="2025-11-22 07:45:47.010452782 +0000 UTC m=+2084.119906795" observedRunningTime="2025-11-22 07:45:48.256680548 +0000 UTC m=+2085.366134571" watchObservedRunningTime="2025-11-22 07:45:48.269611082 +0000 UTC m=+2085.379065115" Nov 22 07:45:48 crc kubenswrapper[4929]: I1122 07:45:48.282524 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Nov 22 07:45:48 crc kubenswrapper[4929]: W1122 07:45:48.285223 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb26c616d_54c4_4a6c_aef6_4f17f7442138.slice/crio-228b248919702f30e950564c9241f27842876a5c74766e59b11e80263da8dfe6 WatchSource:0}: Error finding container 228b248919702f30e950564c9241f27842876a5c74766e59b11e80263da8dfe6: Status 404 returned error can't find the container with id 228b248919702f30e950564c9241f27842876a5c74766e59b11e80263da8dfe6 Nov 22 07:45:49 crc kubenswrapper[4929]: I1122 07:45:49.269662 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"b26c616d-54c4-4a6c-aef6-4f17f7442138","Type":"ContainerStarted","Data":"8fd182301c829ed05c2c20a60bbdd33c3b588e3f841eb2c9a8c67593840bc2e7"} Nov 22 07:45:49 crc kubenswrapper[4929]: I1122 07:45:49.275297 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"b26c616d-54c4-4a6c-aef6-4f17f7442138","Type":"ContainerStarted","Data":"228b248919702f30e950564c9241f27842876a5c74766e59b11e80263da8dfe6"} Nov 22 07:45:49 crc kubenswrapper[4929]: I1122 07:45:49.294881 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=2.294857812 podStartE2EDuration="2.294857812s" podCreationTimestamp="2025-11-22 07:45:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:45:49.288092258 +0000 UTC m=+2086.397546281" watchObservedRunningTime="2025-11-22 07:45:49.294857812 +0000 UTC m=+2086.404311825" Nov 22 07:45:52 crc kubenswrapper[4929]: I1122 07:45:52.619348 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Nov 22 07:45:53 crc kubenswrapper[4929]: I1122 07:45:53.049982 4929 scope.go:117] "RemoveContainer" containerID="6c200077ee5cc194de584f0657e735edb69a37eb71d9ea8c3c3c5e4407197f2a" Nov 22 07:45:53 crc kubenswrapper[4929]: I1122 07:45:53.861762 4929 scope.go:117] "RemoveContainer" containerID="3e7d9920c9b3debca85ab0512edf6f13b85ecb1ea466aba46ecd872ca2118d71" Nov 22 07:45:53 crc kubenswrapper[4929]: I1122 07:45:53.883890 4929 scope.go:117] "RemoveContainer" containerID="1070fcba26aa714e79a73ce3e21951a05405b60a3a175241606025bd3a9317f8" Nov 22 07:45:54 crc kubenswrapper[4929]: I1122 07:45:54.011058 4929 scope.go:117] "RemoveContainer" containerID="b3c0e34df4fdf9ce1db275bb8125355c4e79ea53d664d3e990696e39c4cb4aa0" Nov 22 07:45:55 crc kubenswrapper[4929]: I1122 07:45:55.338462 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerStarted","Data":"7d3016db7cc3e94032fc62bc054a659b7295c9ae767b942ca06423fad1855fdd"} Nov 22 07:45:57 crc kubenswrapper[4929]: I1122 07:45:57.619450 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Nov 22 07:45:57 crc kubenswrapper[4929]: I1122 07:45:57.652311 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Nov 22 07:45:58 crc kubenswrapper[4929]: I1122 07:45:58.395233 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.483227 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"72671250-e761-4371-aac4-4789b677d1d7","Type":"ContainerStarted","Data":"de1ade33c4dede03c2065e2a59e9061a993d5b9d3b8a9789576f5d1f4b4d0df3"} Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.485826 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerStarted","Data":"a6faaafb789786b3ef0e0f572ae9a1fcf360811437e294774bea87a508145014"} Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.486008 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.486044 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="proxy-httpd" containerID="cri-o://a6faaafb789786b3ef0e0f572ae9a1fcf360811437e294774bea87a508145014" gracePeriod=30 Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.486080 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="sg-core" containerID="cri-o://7d3016db7cc3e94032fc62bc054a659b7295c9ae767b942ca06423fad1855fdd" gracePeriod=30 Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.486039 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-central-agent" containerID="cri-o://5435817da713c371d075af4f7f523dae3d17a03b03fef1b71fd27ba1aa4a2e24" gracePeriod=30 Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.486155 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-notification-agent" containerID="cri-o://3b2c4394bcc14ba1ebb37813a9ead7ad0759df601a40047fd37e6d0eb65dcd15" gracePeriod=30 Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.511473 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.1848059380000002 podStartE2EDuration="55.511441801s" podCreationTimestamp="2025-11-22 07:45:13 +0000 UTC" firstStartedPulling="2025-11-22 07:45:14.17611779 +0000 UTC m=+2051.285571803" lastFinishedPulling="2025-11-22 07:46:07.502753653 +0000 UTC m=+2104.612207666" observedRunningTime="2025-11-22 07:46:08.504253707 +0000 UTC m=+2105.613707720" watchObservedRunningTime="2025-11-22 07:46:08.511441801 +0000 UTC m=+2105.620895814" Nov 22 07:46:08 crc kubenswrapper[4929]: I1122 07:46:08.538059 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.84177716 podStartE2EDuration="50.538040185s" podCreationTimestamp="2025-11-22 07:45:18 +0000 UTC" firstStartedPulling="2025-11-22 07:45:19.80698144 +0000 UTC m=+2056.916435453" lastFinishedPulling="2025-11-22 07:46:07.503244465 +0000 UTC m=+2104.612698478" observedRunningTime="2025-11-22 07:46:08.53202783 +0000 UTC m=+2105.641481843" watchObservedRunningTime="2025-11-22 07:46:08.538040185 +0000 UTC m=+2105.647494198" Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496633 4929 generic.go:334] "Generic (PLEG): container finished" podID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerID="a6faaafb789786b3ef0e0f572ae9a1fcf360811437e294774bea87a508145014" exitCode=0 Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496886 4929 generic.go:334] "Generic (PLEG): container finished" podID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerID="7d3016db7cc3e94032fc62bc054a659b7295c9ae767b942ca06423fad1855fdd" exitCode=2 Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496894 4929 generic.go:334] "Generic (PLEG): container finished" podID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerID="3b2c4394bcc14ba1ebb37813a9ead7ad0759df601a40047fd37e6d0eb65dcd15" exitCode=0 Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496703 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerDied","Data":"a6faaafb789786b3ef0e0f572ae9a1fcf360811437e294774bea87a508145014"} Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496928 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerDied","Data":"7d3016db7cc3e94032fc62bc054a659b7295c9ae767b942ca06423fad1855fdd"} Nov 22 07:46:09 crc kubenswrapper[4929]: I1122 07:46:09.496943 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerDied","Data":"3b2c4394bcc14ba1ebb37813a9ead7ad0759df601a40047fd37e6d0eb65dcd15"} Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.541355 4929 generic.go:334] "Generic (PLEG): container finished" podID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerID="5435817da713c371d075af4f7f523dae3d17a03b03fef1b71fd27ba1aa4a2e24" exitCode=0 Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.541652 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerDied","Data":"5435817da713c371d075af4f7f523dae3d17a03b03fef1b71fd27ba1aa4a2e24"} Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.667567 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793299 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793382 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcpt6\" (UniqueName: \"kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793434 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793461 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793570 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793607 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793637 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts\") pod \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\" (UID: \"ea9f837e-fbb6-493b-ba22-3e51e744e3c8\") " Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.793970 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.794309 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.795318 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.800192 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts" (OuterVolumeSpecName: "scripts") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.800432 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6" (OuterVolumeSpecName: "kube-api-access-rcpt6") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "kube-api-access-rcpt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.825065 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.877465 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.896415 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcpt6\" (UniqueName: \"kubernetes.io/projected/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-kube-api-access-rcpt6\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.896448 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.896463 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.896475 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.896490 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.918039 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data" (OuterVolumeSpecName: "config-data") pod "ea9f837e-fbb6-493b-ba22-3e51e744e3c8" (UID: "ea9f837e-fbb6-493b-ba22-3e51e744e3c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:11 crc kubenswrapper[4929]: I1122 07:46:11.999130 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea9f837e-fbb6-493b-ba22-3e51e744e3c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.551374 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ea9f837e-fbb6-493b-ba22-3e51e744e3c8","Type":"ContainerDied","Data":"4839bbff2d89f7446de65346abe0b15cab2291d1e44f30af146697f333d31335"} Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.551438 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.552326 4929 scope.go:117] "RemoveContainer" containerID="a6faaafb789786b3ef0e0f572ae9a1fcf360811437e294774bea87a508145014" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.578429 4929 scope.go:117] "RemoveContainer" containerID="7d3016db7cc3e94032fc62bc054a659b7295c9ae767b942ca06423fad1855fdd" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.579538 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.591270 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599050 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:46:12 crc kubenswrapper[4929]: E1122 07:46:12.599481 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-central-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599500 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-central-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: E1122 07:46:12.599514 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-notification-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599521 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-notification-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: E1122 07:46:12.599537 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="proxy-httpd" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599544 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="proxy-httpd" Nov 22 07:46:12 crc kubenswrapper[4929]: E1122 07:46:12.599553 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="sg-core" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599558 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="sg-core" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599731 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-central-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599744 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="ceilometer-notification-agent" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599758 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="sg-core" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.599772 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" containerName="proxy-httpd" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.601328 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.603320 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.603331 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.604504 4929 scope.go:117] "RemoveContainer" containerID="3b2c4394bcc14ba1ebb37813a9ead7ad0759df601a40047fd37e6d0eb65dcd15" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.618325 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.625481 4929 scope.go:117] "RemoveContainer" containerID="5435817da713c371d075af4f7f523dae3d17a03b03fef1b71fd27ba1aa4a2e24" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710238 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710290 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710399 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710482 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710509 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmz6q\" (UniqueName: \"kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710587 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.710672 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812557 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812629 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812651 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812702 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812768 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.812793 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmz6q\" (UniqueName: \"kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.813180 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.813396 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.818906 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.818947 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.820333 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.830638 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.831456 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmz6q\" (UniqueName: \"kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q\") pod \"ceilometer-0\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " pod="openstack/ceilometer-0" Nov 22 07:46:12 crc kubenswrapper[4929]: I1122 07:46:12.920583 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:46:13 crc kubenswrapper[4929]: I1122 07:46:13.416775 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:46:13 crc kubenswrapper[4929]: I1122 07:46:13.561734 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerStarted","Data":"0b298af4f419927c92362f57c2ec74294a441906f952d030a286a727cb573f17"} Nov 22 07:46:13 crc kubenswrapper[4929]: I1122 07:46:13.957433 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea9f837e-fbb6-493b-ba22-3e51e744e3c8" path="/var/lib/kubelet/pods/ea9f837e-fbb6-493b-ba22-3e51e744e3c8/volumes" Nov 22 07:46:17 crc kubenswrapper[4929]: I1122 07:46:17.630315 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerStarted","Data":"429ff505f482c792bf35375da3a8edbe12ebea885cb2f0b54d7cc0fb2a066ff9"} Nov 22 07:46:19 crc kubenswrapper[4929]: I1122 07:46:19.658427 4929 generic.go:334] "Generic (PLEG): container finished" podID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" containerID="4c8d130ba2bfc229564add643c163fd3b2720216177366f184bd57174725d9b2" exitCode=0 Nov 22 07:46:19 crc kubenswrapper[4929]: I1122 07:46:19.658536 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rsm44" event={"ID":"155c1bdd-4b26-4059-8ab7-7a6299bc17c9","Type":"ContainerDied","Data":"4c8d130ba2bfc229564add643c163fd3b2720216177366f184bd57174725d9b2"} Nov 22 07:46:20 crc kubenswrapper[4929]: I1122 07:46:20.669615 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerStarted","Data":"48d94726a38244705f0514da864b6928c691ad7e10844f5a0a612c5a6186bec6"} Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.019660 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rsm44" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115293 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnqhm\" (UniqueName: \"kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm\") pod \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115352 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data\") pod \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115383 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts\") pod \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115414 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs\") pod \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115533 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle\") pod \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\" (UID: \"155c1bdd-4b26-4059-8ab7-7a6299bc17c9\") " Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.115927 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs" (OuterVolumeSpecName: "logs") pod "155c1bdd-4b26-4059-8ab7-7a6299bc17c9" (UID: "155c1bdd-4b26-4059-8ab7-7a6299bc17c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.116695 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.123808 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm" (OuterVolumeSpecName: "kube-api-access-pnqhm") pod "155c1bdd-4b26-4059-8ab7-7a6299bc17c9" (UID: "155c1bdd-4b26-4059-8ab7-7a6299bc17c9"). InnerVolumeSpecName "kube-api-access-pnqhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.124247 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts" (OuterVolumeSpecName: "scripts") pod "155c1bdd-4b26-4059-8ab7-7a6299bc17c9" (UID: "155c1bdd-4b26-4059-8ab7-7a6299bc17c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.143031 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "155c1bdd-4b26-4059-8ab7-7a6299bc17c9" (UID: "155c1bdd-4b26-4059-8ab7-7a6299bc17c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.147976 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data" (OuterVolumeSpecName: "config-data") pod "155c1bdd-4b26-4059-8ab7-7a6299bc17c9" (UID: "155c1bdd-4b26-4059-8ab7-7a6299bc17c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.218844 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.218889 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnqhm\" (UniqueName: \"kubernetes.io/projected/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-kube-api-access-pnqhm\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.218905 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.218918 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/155c1bdd-4b26-4059-8ab7-7a6299bc17c9-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.679420 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-rsm44" event={"ID":"155c1bdd-4b26-4059-8ab7-7a6299bc17c9","Type":"ContainerDied","Data":"b52e461ef02e8d1eabf60a06c855c2412aeda87e02d6e80b3c819f5fc68f351b"} Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.679461 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b52e461ef02e8d1eabf60a06c855c2412aeda87e02d6e80b3c819f5fc68f351b" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.679478 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-rsm44" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.783703 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5bcb6df596-xswbt"] Nov 22 07:46:21 crc kubenswrapper[4929]: E1122 07:46:21.784115 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" containerName="placement-db-sync" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.784137 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" containerName="placement-db-sync" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.784400 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" containerName="placement-db-sync" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.785622 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.787502 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.789678 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jrjkj" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.799816 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.800069 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.804530 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.817766 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bcb6df596-xswbt"] Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829393 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-combined-ca-bundle\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829453 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-config-data\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829555 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-scripts\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829592 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-internal-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829611 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-852kh\" (UniqueName: \"kubernetes.io/projected/28cf3d66-0622-409a-b63b-212351f3827d-kube-api-access-852kh\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829874 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-public-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.829967 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28cf3d66-0622-409a-b63b-212351f3827d-logs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931120 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-scripts\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931175 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-internal-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-852kh\" (UniqueName: \"kubernetes.io/projected/28cf3d66-0622-409a-b63b-212351f3827d-kube-api-access-852kh\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931270 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-public-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931294 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28cf3d66-0622-409a-b63b-212351f3827d-logs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931337 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-combined-ca-bundle\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.931363 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-config-data\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.934381 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-scripts\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.934576 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-config-data\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.934716 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/28cf3d66-0622-409a-b63b-212351f3827d-logs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.944914 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-combined-ca-bundle\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.944947 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-public-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.945196 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/28cf3d66-0622-409a-b63b-212351f3827d-internal-tls-certs\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:21 crc kubenswrapper[4929]: I1122 07:46:21.954980 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-852kh\" (UniqueName: \"kubernetes.io/projected/28cf3d66-0622-409a-b63b-212351f3827d-kube-api-access-852kh\") pod \"placement-5bcb6df596-xswbt\" (UID: \"28cf3d66-0622-409a-b63b-212351f3827d\") " pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:22 crc kubenswrapper[4929]: I1122 07:46:22.111928 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:22 crc kubenswrapper[4929]: I1122 07:46:22.638220 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bcb6df596-xswbt"] Nov 22 07:46:22 crc kubenswrapper[4929]: I1122 07:46:22.688882 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerStarted","Data":"ce765c327170f06830c3fbb8b16cc30294f09b7ab5ce6a9d225f06eee474791d"} Nov 22 07:46:22 crc kubenswrapper[4929]: I1122 07:46:22.689867 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bcb6df596-xswbt" event={"ID":"28cf3d66-0622-409a-b63b-212351f3827d","Type":"ContainerStarted","Data":"5f91b24d2116a607d2596889e5092f717bc699d492a37c38f394d258bce02cec"} Nov 22 07:46:23 crc kubenswrapper[4929]: I1122 07:46:23.701355 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bcb6df596-xswbt" event={"ID":"28cf3d66-0622-409a-b63b-212351f3827d","Type":"ContainerStarted","Data":"f955f6c1ce871e962eb508342f32fcccd68e859756ec9f42e6005da34cef4858"} Nov 22 07:46:25 crc kubenswrapper[4929]: I1122 07:46:25.725759 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bcb6df596-xswbt" event={"ID":"28cf3d66-0622-409a-b63b-212351f3827d","Type":"ContainerStarted","Data":"c61f3ab8cd4b14ce3ca231365e27066529712c14ab0d25332f2ae966d895d7a9"} Nov 22 07:46:25 crc kubenswrapper[4929]: I1122 07:46:25.728436 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:25 crc kubenswrapper[4929]: I1122 07:46:25.728511 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:25 crc kubenswrapper[4929]: I1122 07:46:25.760703 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5bcb6df596-xswbt" podStartSLOduration=4.7606776889999995 podStartE2EDuration="4.760677689s" podCreationTimestamp="2025-11-22 07:46:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:25.748706858 +0000 UTC m=+2122.858160921" watchObservedRunningTime="2025-11-22 07:46:25.760677689 +0000 UTC m=+2122.870131712" Nov 22 07:46:27 crc kubenswrapper[4929]: I1122 07:46:27.747320 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerStarted","Data":"80a847b3ff021d516a0875b80f39321527f4115d9a6c0d4427b14168f95eed2b"} Nov 22 07:46:28 crc kubenswrapper[4929]: I1122 07:46:28.756045 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:46:28 crc kubenswrapper[4929]: I1122 07:46:28.799431 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.630564055 podStartE2EDuration="16.799408021s" podCreationTimestamp="2025-11-22 07:46:12 +0000 UTC" firstStartedPulling="2025-11-22 07:46:13.438189346 +0000 UTC m=+2110.547643359" lastFinishedPulling="2025-11-22 07:46:26.607033312 +0000 UTC m=+2123.716487325" observedRunningTime="2025-11-22 07:46:28.783822993 +0000 UTC m=+2125.893277006" watchObservedRunningTime="2025-11-22 07:46:28.799408021 +0000 UTC m=+2125.908862034" Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.863121 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-vpgb4"] Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.865817 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.874495 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-vpgb4"] Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.924549 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.969473 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-59xgj"] Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.970981 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:42 crc kubenswrapper[4929]: I1122 07:46:42.976908 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-59xgj"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.033321 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.033807 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-494wd\" (UniqueName: \"kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.077957 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f286-account-create-rvfxv"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.079184 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.080946 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.086410 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-tpt6v"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.087699 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.096168 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f286-account-create-rvfxv"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.105973 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tpt6v"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.141678 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfbn4\" (UniqueName: \"kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.141771 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-494wd\" (UniqueName: \"kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.142701 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.142917 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.143863 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.163668 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-494wd\" (UniqueName: \"kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd\") pod \"nova-api-db-create-vpgb4\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.199284 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246251 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246320 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246347 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqblb\" (UniqueName: \"kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246375 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfbn4\" (UniqueName: \"kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246420 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.246549 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tztc6\" (UniqueName: \"kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.262413 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.265787 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a212-account-create-b44cg"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.267312 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.269592 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.286993 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a212-account-create-b44cg"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.290495 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfbn4\" (UniqueName: \"kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4\") pod \"nova-cell0-db-create-59xgj\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.298016 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.348699 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tztc6\" (UniqueName: \"kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.348787 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.348826 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.348853 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqblb\" (UniqueName: \"kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.351340 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.352355 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.367933 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tztc6\" (UniqueName: \"kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6\") pod \"nova-api-f286-account-create-rvfxv\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.369499 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqblb\" (UniqueName: \"kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb\") pod \"nova-cell1-db-create-tpt6v\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.400372 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.409085 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.452842 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.458427 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2j68\" (UniqueName: \"kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.476200 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-3a16-account-create-hz4wf"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.480842 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.488048 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.511112 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3a16-account-create-hz4wf"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.559898 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.560072 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2j68\" (UniqueName: \"kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.561188 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.580494 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2j68\" (UniqueName: \"kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68\") pod \"nova-cell0-a212-account-create-b44cg\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.661365 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrcgx\" (UniqueName: \"kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.661560 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.747807 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.766899 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrcgx\" (UniqueName: \"kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.767154 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.768305 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.789880 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrcgx\" (UniqueName: \"kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx\") pod \"nova-cell1-3a16-account-create-hz4wf\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: W1122 07:46:43.805468 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9dac5799_42d7_4917_ad44_68640f1526ff.slice/crio-9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903 WatchSource:0}: Error finding container 9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903: Status 404 returned error can't find the container with id 9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903 Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.807974 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-vpgb4"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.845650 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.896866 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-59xgj"] Nov 22 07:46:43 crc kubenswrapper[4929]: I1122 07:46:43.911634 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vpgb4" event={"ID":"9dac5799-42d7-4917-ad44-68640f1526ff","Type":"ContainerStarted","Data":"9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.045704 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f286-account-create-rvfxv"] Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.059567 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-tpt6v"] Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.212311 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a212-account-create-b44cg"] Nov 22 07:46:44 crc kubenswrapper[4929]: W1122 07:46:44.220743 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8726cb86_7598_4300_aa4a_4c4dd8deaa3d.slice/crio-682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002 WatchSource:0}: Error finding container 682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002: Status 404 returned error can't find the container with id 682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002 Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.379288 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3a16-account-create-hz4wf"] Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.922878 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f286-account-create-rvfxv" event={"ID":"d84fa1ff-4863-4730-be5b-a2ae8516dc71","Type":"ContainerStarted","Data":"365572ecc13ab35539eff2b0acd25a79fbdcd1258856e14c44fcb313f11b7e8f"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.923194 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f286-account-create-rvfxv" event={"ID":"d84fa1ff-4863-4730-be5b-a2ae8516dc71","Type":"ContainerStarted","Data":"68b50abe9c7af020b3c0ce18fb3aa22bb1deb717cf48c49c706d02f57ab712d2"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.925573 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vpgb4" event={"ID":"9dac5799-42d7-4917-ad44-68640f1526ff","Type":"ContainerStarted","Data":"6aedd34efb89a1c4786abb64e770987ec7c8ba89b55f49603792b39642d5a435"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.927543 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3a16-account-create-hz4wf" event={"ID":"a59addba-444c-4d86-bd65-d8796e750a94","Type":"ContainerStarted","Data":"5caad4214777e1cd83891d1edd9db843145698e5a94efda6149ce28bca8f1f97"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.927569 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3a16-account-create-hz4wf" event={"ID":"a59addba-444c-4d86-bd65-d8796e750a94","Type":"ContainerStarted","Data":"28feff113f5f02b93f05e83da1a65ba720c7313756af29391d9cf872646f595e"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.929531 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-59xgj" event={"ID":"701a52ea-cd55-4a02-9662-18d5899a3324","Type":"ContainerStarted","Data":"0ef66ad1b8e294a458a52c4e6b0e9e7131283f1e32e3943ac5e082f98e127322"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.929557 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-59xgj" event={"ID":"701a52ea-cd55-4a02-9662-18d5899a3324","Type":"ContainerStarted","Data":"827723592f3abb88011cccae3f2a3fed66323949b355f1ffca849ad291a790d9"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.931368 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tpt6v" event={"ID":"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7","Type":"ContainerStarted","Data":"c90db54ce8a09fb86806a636353f0d15e7a425187d70c10912178f7de56938b5"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.931409 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tpt6v" event={"ID":"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7","Type":"ContainerStarted","Data":"ca75852426c94565890011ad0bffd5f9bdf590a1832cdcc388111dd9f1d589ee"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.932973 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a212-account-create-b44cg" event={"ID":"8726cb86-7598-4300-aa4a-4c4dd8deaa3d","Type":"ContainerStarted","Data":"d4c3449574449704a8cecef90edaeef7595bf637008703f6f64666d32277eaf6"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.932997 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a212-account-create-b44cg" event={"ID":"8726cb86-7598-4300-aa4a-4c4dd8deaa3d","Type":"ContainerStarted","Data":"682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002"} Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.943140 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-vpgb4" podStartSLOduration=2.943122831 podStartE2EDuration="2.943122831s" podCreationTimestamp="2025-11-22 07:46:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:44.938031898 +0000 UTC m=+2142.047485951" watchObservedRunningTime="2025-11-22 07:46:44.943122831 +0000 UTC m=+2142.052576844" Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.955512 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-tpt6v" podStartSLOduration=1.9554938210000001 podStartE2EDuration="1.955493821s" podCreationTimestamp="2025-11-22 07:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:44.953352789 +0000 UTC m=+2142.062806812" watchObservedRunningTime="2025-11-22 07:46:44.955493821 +0000 UTC m=+2142.064947834" Nov 22 07:46:44 crc kubenswrapper[4929]: I1122 07:46:44.993353 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-59xgj" podStartSLOduration=2.993330078 podStartE2EDuration="2.993330078s" podCreationTimestamp="2025-11-22 07:46:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:44.97649704 +0000 UTC m=+2142.085951053" watchObservedRunningTime="2025-11-22 07:46:44.993330078 +0000 UTC m=+2142.102784081" Nov 22 07:46:45 crc kubenswrapper[4929]: I1122 07:46:45.969508 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-f286-account-create-rvfxv" podStartSLOduration=2.969491448 podStartE2EDuration="2.969491448s" podCreationTimestamp="2025-11-22 07:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:45.962246843 +0000 UTC m=+2143.071700856" watchObservedRunningTime="2025-11-22 07:46:45.969491448 +0000 UTC m=+2143.078945461" Nov 22 07:46:45 crc kubenswrapper[4929]: I1122 07:46:45.980100 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-a212-account-create-b44cg" podStartSLOduration=2.980085485 podStartE2EDuration="2.980085485s" podCreationTimestamp="2025-11-22 07:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:45.976240092 +0000 UTC m=+2143.085694115" watchObservedRunningTime="2025-11-22 07:46:45.980085485 +0000 UTC m=+2143.089539498" Nov 22 07:46:45 crc kubenswrapper[4929]: I1122 07:46:45.993680 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-3a16-account-create-hz4wf" podStartSLOduration=2.993659424 podStartE2EDuration="2.993659424s" podCreationTimestamp="2025-11-22 07:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:46:45.991741818 +0000 UTC m=+2143.101195841" watchObservedRunningTime="2025-11-22 07:46:45.993659424 +0000 UTC m=+2143.103113437" Nov 22 07:46:46 crc kubenswrapper[4929]: I1122 07:46:46.956604 4929 generic.go:334] "Generic (PLEG): container finished" podID="3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" containerID="c90db54ce8a09fb86806a636353f0d15e7a425187d70c10912178f7de56938b5" exitCode=0 Nov 22 07:46:46 crc kubenswrapper[4929]: I1122 07:46:46.956701 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tpt6v" event={"ID":"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7","Type":"ContainerDied","Data":"c90db54ce8a09fb86806a636353f0d15e7a425187d70c10912178f7de56938b5"} Nov 22 07:46:46 crc kubenswrapper[4929]: I1122 07:46:46.959044 4929 generic.go:334] "Generic (PLEG): container finished" podID="a59addba-444c-4d86-bd65-d8796e750a94" containerID="5caad4214777e1cd83891d1edd9db843145698e5a94efda6149ce28bca8f1f97" exitCode=0 Nov 22 07:46:46 crc kubenswrapper[4929]: I1122 07:46:46.959109 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3a16-account-create-hz4wf" event={"ID":"a59addba-444c-4d86-bd65-d8796e750a94","Type":"ContainerDied","Data":"5caad4214777e1cd83891d1edd9db843145698e5a94efda6149ce28bca8f1f97"} Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.324003 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.329738 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.367744 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqblb\" (UniqueName: \"kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb\") pod \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.368006 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrcgx\" (UniqueName: \"kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx\") pod \"a59addba-444c-4d86-bd65-d8796e750a94\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.368086 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts\") pod \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\" (UID: \"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7\") " Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.368144 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts\") pod \"a59addba-444c-4d86-bd65-d8796e750a94\" (UID: \"a59addba-444c-4d86-bd65-d8796e750a94\") " Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.368808 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a59addba-444c-4d86-bd65-d8796e750a94" (UID: "a59addba-444c-4d86-bd65-d8796e750a94"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.368824 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" (UID: "3a55beaa-874f-44cf-bdc3-1eb292fb8ae7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.381010 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb" (OuterVolumeSpecName: "kube-api-access-fqblb") pod "3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" (UID: "3a55beaa-874f-44cf-bdc3-1eb292fb8ae7"). InnerVolumeSpecName "kube-api-access-fqblb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.381112 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx" (OuterVolumeSpecName: "kube-api-access-wrcgx") pod "a59addba-444c-4d86-bd65-d8796e750a94" (UID: "a59addba-444c-4d86-bd65-d8796e750a94"). InnerVolumeSpecName "kube-api-access-wrcgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.471284 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrcgx\" (UniqueName: \"kubernetes.io/projected/a59addba-444c-4d86-bd65-d8796e750a94-kube-api-access-wrcgx\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.471317 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.471326 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a59addba-444c-4d86-bd65-d8796e750a94-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.471336 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqblb\" (UniqueName: \"kubernetes.io/projected/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7-kube-api-access-fqblb\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.980501 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-tpt6v" event={"ID":"3a55beaa-874f-44cf-bdc3-1eb292fb8ae7","Type":"ContainerDied","Data":"ca75852426c94565890011ad0bffd5f9bdf590a1832cdcc388111dd9f1d589ee"} Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.980590 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca75852426c94565890011ad0bffd5f9bdf590a1832cdcc388111dd9f1d589ee" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.980632 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-tpt6v" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.982665 4929 generic.go:334] "Generic (PLEG): container finished" podID="9dac5799-42d7-4917-ad44-68640f1526ff" containerID="6aedd34efb89a1c4786abb64e770987ec7c8ba89b55f49603792b39642d5a435" exitCode=0 Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.982716 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vpgb4" event={"ID":"9dac5799-42d7-4917-ad44-68640f1526ff","Type":"ContainerDied","Data":"6aedd34efb89a1c4786abb64e770987ec7c8ba89b55f49603792b39642d5a435"} Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.984982 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3a16-account-create-hz4wf" event={"ID":"a59addba-444c-4d86-bd65-d8796e750a94","Type":"ContainerDied","Data":"28feff113f5f02b93f05e83da1a65ba720c7313756af29391d9cf872646f595e"} Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.985033 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28feff113f5f02b93f05e83da1a65ba720c7313756af29391d9cf872646f595e" Nov 22 07:46:48 crc kubenswrapper[4929]: I1122 07:46:48.985117 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3a16-account-create-hz4wf" Nov 22 07:46:49 crc kubenswrapper[4929]: I1122 07:46:49.001509 4929 generic.go:334] "Generic (PLEG): container finished" podID="701a52ea-cd55-4a02-9662-18d5899a3324" containerID="0ef66ad1b8e294a458a52c4e6b0e9e7131283f1e32e3943ac5e082f98e127322" exitCode=0 Nov 22 07:46:49 crc kubenswrapper[4929]: I1122 07:46:49.001569 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-59xgj" event={"ID":"701a52ea-cd55-4a02-9662-18d5899a3324","Type":"ContainerDied","Data":"0ef66ad1b8e294a458a52c4e6b0e9e7131283f1e32e3943ac5e082f98e127322"} Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.014516 4929 generic.go:334] "Generic (PLEG): container finished" podID="8726cb86-7598-4300-aa4a-4c4dd8deaa3d" containerID="d4c3449574449704a8cecef90edaeef7595bf637008703f6f64666d32277eaf6" exitCode=0 Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.014623 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a212-account-create-b44cg" event={"ID":"8726cb86-7598-4300-aa4a-4c4dd8deaa3d","Type":"ContainerDied","Data":"d4c3449574449704a8cecef90edaeef7595bf637008703f6f64666d32277eaf6"} Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.016648 4929 generic.go:334] "Generic (PLEG): container finished" podID="d84fa1ff-4863-4730-be5b-a2ae8516dc71" containerID="365572ecc13ab35539eff2b0acd25a79fbdcd1258856e14c44fcb313f11b7e8f" exitCode=0 Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.016682 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f286-account-create-rvfxv" event={"ID":"d84fa1ff-4863-4730-be5b-a2ae8516dc71","Type":"ContainerDied","Data":"365572ecc13ab35539eff2b0acd25a79fbdcd1258856e14c44fcb313f11b7e8f"} Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.427708 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.437054 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.508222 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts\") pod \"9dac5799-42d7-4917-ad44-68640f1526ff\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.508323 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfbn4\" (UniqueName: \"kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4\") pod \"701a52ea-cd55-4a02-9662-18d5899a3324\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.508358 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-494wd\" (UniqueName: \"kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd\") pod \"9dac5799-42d7-4917-ad44-68640f1526ff\" (UID: \"9dac5799-42d7-4917-ad44-68640f1526ff\") " Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.508387 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts\") pod \"701a52ea-cd55-4a02-9662-18d5899a3324\" (UID: \"701a52ea-cd55-4a02-9662-18d5899a3324\") " Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.509352 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9dac5799-42d7-4917-ad44-68640f1526ff" (UID: "9dac5799-42d7-4917-ad44-68640f1526ff"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.509354 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "701a52ea-cd55-4a02-9662-18d5899a3324" (UID: "701a52ea-cd55-4a02-9662-18d5899a3324"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.514367 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd" (OuterVolumeSpecName: "kube-api-access-494wd") pod "9dac5799-42d7-4917-ad44-68640f1526ff" (UID: "9dac5799-42d7-4917-ad44-68640f1526ff"). InnerVolumeSpecName "kube-api-access-494wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.515592 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4" (OuterVolumeSpecName: "kube-api-access-pfbn4") pod "701a52ea-cd55-4a02-9662-18d5899a3324" (UID: "701a52ea-cd55-4a02-9662-18d5899a3324"). InnerVolumeSpecName "kube-api-access-pfbn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.610080 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9dac5799-42d7-4917-ad44-68640f1526ff-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.610118 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfbn4\" (UniqueName: \"kubernetes.io/projected/701a52ea-cd55-4a02-9662-18d5899a3324-kube-api-access-pfbn4\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.610137 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-494wd\" (UniqueName: \"kubernetes.io/projected/9dac5799-42d7-4917-ad44-68640f1526ff-kube-api-access-494wd\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:50 crc kubenswrapper[4929]: I1122 07:46:50.610150 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/701a52ea-cd55-4a02-9662-18d5899a3324-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.030577 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-59xgj" event={"ID":"701a52ea-cd55-4a02-9662-18d5899a3324","Type":"ContainerDied","Data":"827723592f3abb88011cccae3f2a3fed66323949b355f1ffca849ad291a790d9"} Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.030628 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="827723592f3abb88011cccae3f2a3fed66323949b355f1ffca849ad291a790d9" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.031938 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-59xgj" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.034285 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vpgb4" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.034309 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vpgb4" event={"ID":"9dac5799-42d7-4917-ad44-68640f1526ff","Type":"ContainerDied","Data":"9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903"} Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.034342 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ae162925838683a8e17894511e1fb6ac084c9f80a4a35470c1c3915acc9b903" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.514558 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.526505 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts\") pod \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.526575 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2j68\" (UniqueName: \"kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68\") pod \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\" (UID: \"8726cb86-7598-4300-aa4a-4c4dd8deaa3d\") " Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.527007 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8726cb86-7598-4300-aa4a-4c4dd8deaa3d" (UID: "8726cb86-7598-4300-aa4a-4c4dd8deaa3d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.528162 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.532370 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68" (OuterVolumeSpecName: "kube-api-access-m2j68") pod "8726cb86-7598-4300-aa4a-4c4dd8deaa3d" (UID: "8726cb86-7598-4300-aa4a-4c4dd8deaa3d"). InnerVolumeSpecName "kube-api-access-m2j68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.627759 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.629823 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2j68\" (UniqueName: \"kubernetes.io/projected/8726cb86-7598-4300-aa4a-4c4dd8deaa3d-kube-api-access-m2j68\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.731554 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts\") pod \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.731749 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tztc6\" (UniqueName: \"kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6\") pod \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\" (UID: \"d84fa1ff-4863-4730-be5b-a2ae8516dc71\") " Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.732592 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d84fa1ff-4863-4730-be5b-a2ae8516dc71" (UID: "d84fa1ff-4863-4730-be5b-a2ae8516dc71"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.737708 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6" (OuterVolumeSpecName: "kube-api-access-tztc6") pod "d84fa1ff-4863-4730-be5b-a2ae8516dc71" (UID: "d84fa1ff-4863-4730-be5b-a2ae8516dc71"). InnerVolumeSpecName "kube-api-access-tztc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.833823 4929 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d84fa1ff-4863-4730-be5b-a2ae8516dc71-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:51 crc kubenswrapper[4929]: I1122 07:46:51.833865 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tztc6\" (UniqueName: \"kubernetes.io/projected/d84fa1ff-4863-4730-be5b-a2ae8516dc71-kube-api-access-tztc6\") on node \"crc\" DevicePath \"\"" Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.065522 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a212-account-create-b44cg" event={"ID":"8726cb86-7598-4300-aa4a-4c4dd8deaa3d","Type":"ContainerDied","Data":"682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002"} Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.065562 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a212-account-create-b44cg" Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.065580 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="682a34ea06997776b867ca5c4cbf410a5d2f835f3350198edf5458554d2de002" Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.067237 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f286-account-create-rvfxv" event={"ID":"d84fa1ff-4863-4730-be5b-a2ae8516dc71","Type":"ContainerDied","Data":"68b50abe9c7af020b3c0ce18fb3aa22bb1deb717cf48c49c706d02f57ab712d2"} Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.067263 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68b50abe9c7af020b3c0ce18fb3aa22bb1deb717cf48c49c706d02f57ab712d2" Nov 22 07:46:52 crc kubenswrapper[4929]: I1122 07:46:52.067337 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f286-account-create-rvfxv" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.642858 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-s4kv5"] Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643597 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8726cb86-7598-4300-aa4a-4c4dd8deaa3d" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643612 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8726cb86-7598-4300-aa4a-4c4dd8deaa3d" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643633 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a59addba-444c-4d86-bd65-d8796e750a94" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643642 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a59addba-444c-4d86-bd65-d8796e750a94" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643661 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701a52ea-cd55-4a02-9662-18d5899a3324" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643670 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="701a52ea-cd55-4a02-9662-18d5899a3324" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643693 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9dac5799-42d7-4917-ad44-68640f1526ff" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643701 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9dac5799-42d7-4917-ad44-68640f1526ff" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643726 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d84fa1ff-4863-4730-be5b-a2ae8516dc71" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643734 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d84fa1ff-4863-4730-be5b-a2ae8516dc71" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: E1122 07:46:53.643752 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643760 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643969 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8726cb86-7598-4300-aa4a-4c4dd8deaa3d" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.643992 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="9dac5799-42d7-4917-ad44-68640f1526ff" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.644008 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="a59addba-444c-4d86-bd65-d8796e750a94" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.644021 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="701a52ea-cd55-4a02-9662-18d5899a3324" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.644035 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="d84fa1ff-4863-4730-be5b-a2ae8516dc71" containerName="mariadb-account-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.644062 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" containerName="mariadb-database-create" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.644834 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.648540 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.649076 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mffzz" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.651955 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.655801 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-s4kv5"] Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.674975 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8mhr\" (UniqueName: \"kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.675016 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.675079 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.675150 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.777013 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.777127 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8mhr\" (UniqueName: \"kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.777160 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.777249 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.783023 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.784028 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.789637 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.795266 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8mhr\" (UniqueName: \"kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr\") pod \"nova-cell0-conductor-db-sync-s4kv5\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:53 crc kubenswrapper[4929]: I1122 07:46:53.962854 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:46:55 crc kubenswrapper[4929]: I1122 07:46:55.110006 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-s4kv5"] Nov 22 07:46:55 crc kubenswrapper[4929]: W1122 07:46:55.118811 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3c0d097_15a2_494f_a1cc_2bde685efa87.slice/crio-c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a WatchSource:0}: Error finding container c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a: Status 404 returned error can't find the container with id c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a Nov 22 07:46:56 crc kubenswrapper[4929]: I1122 07:46:56.127523 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" event={"ID":"f3c0d097-15a2-494f-a1cc-2bde685efa87","Type":"ContainerStarted","Data":"c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a"} Nov 22 07:46:58 crc kubenswrapper[4929]: I1122 07:46:58.788548 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:46:58 crc kubenswrapper[4929]: I1122 07:46:58.800628 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bcb6df596-xswbt" Nov 22 07:47:15 crc kubenswrapper[4929]: E1122 07:47:15.632607 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Nov 22 07:47:15 crc kubenswrapper[4929]: E1122 07:47:15.633356 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w8mhr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-s4kv5_openstack(f3c0d097-15a2-494f-a1cc-2bde685efa87): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 07:47:15 crc kubenswrapper[4929]: E1122 07:47:15.634545 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" Nov 22 07:47:16 crc kubenswrapper[4929]: E1122 07:47:16.355950 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" Nov 22 07:47:18 crc kubenswrapper[4929]: I1122 07:47:18.594426 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:47:18 crc kubenswrapper[4929]: I1122 07:47:18.594891 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.976551 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.977201 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-central-agent" containerID="cri-o://429ff505f482c792bf35375da3a8edbe12ebea885cb2f0b54d7cc0fb2a066ff9" gracePeriod=30 Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.977302 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="sg-core" containerID="cri-o://ce765c327170f06830c3fbb8b16cc30294f09b7ab5ce6a9d225f06eee474791d" gracePeriod=30 Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.977400 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="proxy-httpd" containerID="cri-o://80a847b3ff021d516a0875b80f39321527f4115d9a6c0d4427b14168f95eed2b" gracePeriod=30 Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.978071 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-notification-agent" containerID="cri-o://48d94726a38244705f0514da864b6928c691ad7e10844f5a0a612c5a6186bec6" gracePeriod=30 Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.996347 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:20 crc kubenswrapper[4929]: I1122 07:47:20.996555 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" containerName="kube-state-metrics" containerID="cri-o://62d3aa065282e07332a4dfb81e01b075ecde2a4d56c10e06cf2c5f6c411f4ec6" gracePeriod=30 Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.399830 4929 generic.go:334] "Generic (PLEG): container finished" podID="17608323-2c28-467e-83ea-de232b2d1211" containerID="80a847b3ff021d516a0875b80f39321527f4115d9a6c0d4427b14168f95eed2b" exitCode=0 Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.399860 4929 generic.go:334] "Generic (PLEG): container finished" podID="17608323-2c28-467e-83ea-de232b2d1211" containerID="ce765c327170f06830c3fbb8b16cc30294f09b7ab5ce6a9d225f06eee474791d" exitCode=2 Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.399905 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerDied","Data":"80a847b3ff021d516a0875b80f39321527f4115d9a6c0d4427b14168f95eed2b"} Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.399953 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerDied","Data":"ce765c327170f06830c3fbb8b16cc30294f09b7ab5ce6a9d225f06eee474791d"} Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.406332 4929 generic.go:334] "Generic (PLEG): container finished" podID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" containerID="62d3aa065282e07332a4dfb81e01b075ecde2a4d56c10e06cf2c5f6c411f4ec6" exitCode=2 Nov 22 07:47:21 crc kubenswrapper[4929]: I1122 07:47:21.406368 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18c31ba0-cf68-45bf-87da-d04ab3bd8b21","Type":"ContainerDied","Data":"62d3aa065282e07332a4dfb81e01b075ecde2a4d56c10e06cf2c5f6c411f4ec6"} Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.132076 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.247084 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s97vp\" (UniqueName: \"kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp\") pod \"18c31ba0-cf68-45bf-87da-d04ab3bd8b21\" (UID: \"18c31ba0-cf68-45bf-87da-d04ab3bd8b21\") " Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.254121 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp" (OuterVolumeSpecName: "kube-api-access-s97vp") pod "18c31ba0-cf68-45bf-87da-d04ab3bd8b21" (UID: "18c31ba0-cf68-45bf-87da-d04ab3bd8b21"). InnerVolumeSpecName "kube-api-access-s97vp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.350410 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s97vp\" (UniqueName: \"kubernetes.io/projected/18c31ba0-cf68-45bf-87da-d04ab3bd8b21-kube-api-access-s97vp\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.422871 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18c31ba0-cf68-45bf-87da-d04ab3bd8b21","Type":"ContainerDied","Data":"a8a26f7fcf573a6fc6367957223b8ecfcd4caf2c7823262e3443ec785e6c03e3"} Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.422955 4929 scope.go:117] "RemoveContainer" containerID="62d3aa065282e07332a4dfb81e01b075ecde2a4d56c10e06cf2c5f6c411f4ec6" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.423161 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.430503 4929 generic.go:334] "Generic (PLEG): container finished" podID="17608323-2c28-467e-83ea-de232b2d1211" containerID="48d94726a38244705f0514da864b6928c691ad7e10844f5a0a612c5a6186bec6" exitCode=0 Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.430537 4929 generic.go:334] "Generic (PLEG): container finished" podID="17608323-2c28-467e-83ea-de232b2d1211" containerID="429ff505f482c792bf35375da3a8edbe12ebea885cb2f0b54d7cc0fb2a066ff9" exitCode=0 Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.430558 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerDied","Data":"48d94726a38244705f0514da864b6928c691ad7e10844f5a0a612c5a6186bec6"} Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.430586 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerDied","Data":"429ff505f482c792bf35375da3a8edbe12ebea885cb2f0b54d7cc0fb2a066ff9"} Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.533947 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.544293 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.564269 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:22 crc kubenswrapper[4929]: E1122 07:47:22.565110 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" containerName="kube-state-metrics" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.565154 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" containerName="kube-state-metrics" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.565673 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" containerName="kube-state-metrics" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.567592 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.579328 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.579492 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.586491 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.678174 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.678271 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.678324 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.678359 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbb4h\" (UniqueName: \"kubernetes.io/projected/25fc7418-569c-4102-9186-f21d540d4a67-kube-api-access-dbb4h\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.779973 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.780057 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.780114 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.780151 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbb4h\" (UniqueName: \"kubernetes.io/projected/25fc7418-569c-4102-9186-f21d540d4a67-kube-api-access-dbb4h\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.784635 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.785089 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.797599 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25fc7418-569c-4102-9186-f21d540d4a67-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.803501 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbb4h\" (UniqueName: \"kubernetes.io/projected/25fc7418-569c-4102-9186-f21d540d4a67-kube-api-access-dbb4h\") pod \"kube-state-metrics-0\" (UID: \"25fc7418-569c-4102-9186-f21d540d4a67\") " pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.890490 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 07:47:22 crc kubenswrapper[4929]: I1122 07:47:22.975444 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087246 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087401 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087436 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087530 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087595 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmz6q\" (UniqueName: \"kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087771 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.087825 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts\") pod \"17608323-2c28-467e-83ea-de232b2d1211\" (UID: \"17608323-2c28-467e-83ea-de232b2d1211\") " Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.088231 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.088278 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.088516 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.088545 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17608323-2c28-467e-83ea-de232b2d1211-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.095663 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts" (OuterVolumeSpecName: "scripts") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.110600 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q" (OuterVolumeSpecName: "kube-api-access-jmz6q") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "kube-api-access-jmz6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.130088 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.168510 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.190023 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.190055 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmz6q\" (UniqueName: \"kubernetes.io/projected/17608323-2c28-467e-83ea-de232b2d1211-kube-api-access-jmz6q\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.190067 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.190076 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.210862 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data" (OuterVolumeSpecName: "config-data") pod "17608323-2c28-467e-83ea-de232b2d1211" (UID: "17608323-2c28-467e-83ea-de232b2d1211"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.291128 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17608323-2c28-467e-83ea-de232b2d1211-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.364197 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.442496 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17608323-2c28-467e-83ea-de232b2d1211","Type":"ContainerDied","Data":"0b298af4f419927c92362f57c2ec74294a441906f952d030a286a727cb573f17"} Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.442564 4929 scope.go:117] "RemoveContainer" containerID="80a847b3ff021d516a0875b80f39321527f4115d9a6c0d4427b14168f95eed2b" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.442716 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.446540 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"25fc7418-569c-4102-9186-f21d540d4a67","Type":"ContainerStarted","Data":"8ff9436276f00f7030fae3b18b5ef9e82f175bbc218742d8faedb9a3c0074e26"} Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.482557 4929 scope.go:117] "RemoveContainer" containerID="ce765c327170f06830c3fbb8b16cc30294f09b7ab5ce6a9d225f06eee474791d" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.488791 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.501779 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.516096 4929 scope.go:117] "RemoveContainer" containerID="48d94726a38244705f0514da864b6928c691ad7e10844f5a0a612c5a6186bec6" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517159 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:23 crc kubenswrapper[4929]: E1122 07:47:23.517563 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="sg-core" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517587 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="sg-core" Nov 22 07:47:23 crc kubenswrapper[4929]: E1122 07:47:23.517619 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-central-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517627 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-central-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: E1122 07:47:23.517645 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="proxy-httpd" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517651 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="proxy-httpd" Nov 22 07:47:23 crc kubenswrapper[4929]: E1122 07:47:23.517661 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-notification-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517667 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-notification-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517836 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="sg-core" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517852 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-notification-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517865 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="proxy-httpd" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.517875 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="17608323-2c28-467e-83ea-de232b2d1211" containerName="ceilometer-central-agent" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.519542 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.522631 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.522815 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.550123 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.550626 4929 scope.go:117] "RemoveContainer" containerID="429ff505f482c792bf35375da3a8edbe12ebea885cb2f0b54d7cc0fb2a066ff9" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603357 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603478 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603532 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls6gl\" (UniqueName: \"kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603581 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603638 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603661 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.603694 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.705778 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.706406 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls6gl\" (UniqueName: \"kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.706588 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.706765 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.706998 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.707149 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.707271 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.707610 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.709613 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.711920 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.712504 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.719182 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.720450 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.724298 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls6gl\" (UniqueName: \"kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl\") pod \"ceilometer-0\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.884453 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.960784 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17608323-2c28-467e-83ea-de232b2d1211" path="/var/lib/kubelet/pods/17608323-2c28-467e-83ea-de232b2d1211/volumes" Nov 22 07:47:23 crc kubenswrapper[4929]: I1122 07:47:23.961951 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18c31ba0-cf68-45bf-87da-d04ab3bd8b21" path="/var/lib/kubelet/pods/18c31ba0-cf68-45bf-87da-d04ab3bd8b21/volumes" Nov 22 07:47:24 crc kubenswrapper[4929]: I1122 07:47:24.331928 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:24 crc kubenswrapper[4929]: I1122 07:47:24.457612 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerStarted","Data":"6b8dbb4a8039e9621f0f76cd2f73949663fdce7634416224331cb17c61aa2bd5"} Nov 22 07:47:24 crc kubenswrapper[4929]: I1122 07:47:24.714056 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:47:26 crc kubenswrapper[4929]: I1122 07:47:26.478942 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"25fc7418-569c-4102-9186-f21d540d4a67","Type":"ContainerStarted","Data":"21ba3de4ba56b1fbc5d1340d22c9f211b9dbb00768a2c2536f32c48e2d6899c9"} Nov 22 07:47:26 crc kubenswrapper[4929]: I1122 07:47:26.479350 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 07:47:26 crc kubenswrapper[4929]: I1122 07:47:26.493200 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.027364245 podStartE2EDuration="4.493182032s" podCreationTimestamp="2025-11-22 07:47:22 +0000 UTC" firstStartedPulling="2025-11-22 07:47:23.372681307 +0000 UTC m=+2180.482135320" lastFinishedPulling="2025-11-22 07:47:25.838499104 +0000 UTC m=+2182.947953107" observedRunningTime="2025-11-22 07:47:26.491913481 +0000 UTC m=+2183.601367494" watchObservedRunningTime="2025-11-22 07:47:26.493182032 +0000 UTC m=+2183.602636065" Nov 22 07:47:27 crc kubenswrapper[4929]: I1122 07:47:27.488097 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerStarted","Data":"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9"} Nov 22 07:47:30 crc kubenswrapper[4929]: I1122 07:47:30.515085 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerStarted","Data":"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91"} Nov 22 07:47:33 crc kubenswrapper[4929]: I1122 07:47:33.056116 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 07:47:33 crc kubenswrapper[4929]: I1122 07:47:33.556970 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" event={"ID":"f3c0d097-15a2-494f-a1cc-2bde685efa87","Type":"ContainerStarted","Data":"4a0a38ab0e02d62955d90b79852d2db15e898b47138169421d7c158ef27aa7be"} Nov 22 07:47:33 crc kubenswrapper[4929]: I1122 07:47:33.579963 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" podStartSLOduration=3.6935616700000002 podStartE2EDuration="40.579945241s" podCreationTimestamp="2025-11-22 07:46:53 +0000 UTC" firstStartedPulling="2025-11-22 07:46:55.121391223 +0000 UTC m=+2152.230845246" lastFinishedPulling="2025-11-22 07:47:32.007774794 +0000 UTC m=+2189.117228817" observedRunningTime="2025-11-22 07:47:33.572971342 +0000 UTC m=+2190.682425355" watchObservedRunningTime="2025-11-22 07:47:33.579945241 +0000 UTC m=+2190.689399254" Nov 22 07:47:37 crc kubenswrapper[4929]: I1122 07:47:37.589443 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerStarted","Data":"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98"} Nov 22 07:47:48 crc kubenswrapper[4929]: I1122 07:47:48.594782 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:47:48 crc kubenswrapper[4929]: I1122 07:47:48.595367 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.901965 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerStarted","Data":"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3"} Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.902649 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.902180 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="sg-core" containerID="cri-o://a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98" gracePeriod=30 Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.902117 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-central-agent" containerID="cri-o://06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9" gracePeriod=30 Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.902261 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-notification-agent" containerID="cri-o://2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91" gracePeriod=30 Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.902243 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="proxy-httpd" containerID="cri-o://eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3" gracePeriod=30 Nov 22 07:48:04 crc kubenswrapper[4929]: I1122 07:48:04.921349 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.6503426450000003 podStartE2EDuration="41.921333356s" podCreationTimestamp="2025-11-22 07:47:23 +0000 UTC" firstStartedPulling="2025-11-22 07:47:25.079887966 +0000 UTC m=+2182.189341979" lastFinishedPulling="2025-11-22 07:48:04.350878677 +0000 UTC m=+2221.460332690" observedRunningTime="2025-11-22 07:48:04.920832554 +0000 UTC m=+2222.030286577" watchObservedRunningTime="2025-11-22 07:48:04.921333356 +0000 UTC m=+2222.030787369" Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911159 4929 generic.go:334] "Generic (PLEG): container finished" podID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerID="eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3" exitCode=1 Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911461 4929 generic.go:334] "Generic (PLEG): container finished" podID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerID="a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98" exitCode=2 Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911471 4929 generic.go:334] "Generic (PLEG): container finished" podID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerID="06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9" exitCode=0 Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911269 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerDied","Data":"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3"} Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911508 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerDied","Data":"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98"} Nov 22 07:48:05 crc kubenswrapper[4929]: I1122 07:48:05.911521 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerDied","Data":"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9"} Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.367376 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.478903 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.478964 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.479007 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.479052 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.479081 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.479109 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.479153 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls6gl\" (UniqueName: \"kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl\") pod \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\" (UID: \"1a4aa47c-57a9-4aee-86c3-730bd4cec70e\") " Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.480708 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.480794 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.487528 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts" (OuterVolumeSpecName: "scripts") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.490395 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl" (OuterVolumeSpecName: "kube-api-access-ls6gl") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "kube-api-access-ls6gl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.520403 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.566202 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581237 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581266 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581275 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581285 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581296 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.581307 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls6gl\" (UniqueName: \"kubernetes.io/projected/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-kube-api-access-ls6gl\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.582550 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data" (OuterVolumeSpecName: "config-data") pod "1a4aa47c-57a9-4aee-86c3-730bd4cec70e" (UID: "1a4aa47c-57a9-4aee-86c3-730bd4cec70e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.682680 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a4aa47c-57a9-4aee-86c3-730bd4cec70e-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.925545 4929 generic.go:334] "Generic (PLEG): container finished" podID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerID="2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91" exitCode=0 Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.925588 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerDied","Data":"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91"} Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.925621 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.925647 4929 scope.go:117] "RemoveContainer" containerID="eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.925634 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1a4aa47c-57a9-4aee-86c3-730bd4cec70e","Type":"ContainerDied","Data":"6b8dbb4a8039e9621f0f76cd2f73949663fdce7634416224331cb17c61aa2bd5"} Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.949402 4929 scope.go:117] "RemoveContainer" containerID="a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.968081 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.972140 4929 scope.go:117] "RemoveContainer" containerID="2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.983194 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.996943 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:06 crc kubenswrapper[4929]: E1122 07:48:06.997383 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-central-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997400 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-central-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: E1122 07:48:06.997432 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="proxy-httpd" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997440 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="proxy-httpd" Nov 22 07:48:06 crc kubenswrapper[4929]: E1122 07:48:06.997460 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="sg-core" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997469 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="sg-core" Nov 22 07:48:06 crc kubenswrapper[4929]: E1122 07:48:06.997486 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-notification-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997494 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-notification-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997726 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-central-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997750 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="sg-core" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997782 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="ceilometer-notification-agent" Nov 22 07:48:06 crc kubenswrapper[4929]: I1122 07:48:06.997801 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" containerName="proxy-httpd" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.001017 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.002277 4929 scope.go:117] "RemoveContainer" containerID="06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.005099 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.005161 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.005182 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.010088 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.030388 4929 scope.go:117] "RemoveContainer" containerID="eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3" Nov 22 07:48:07 crc kubenswrapper[4929]: E1122 07:48:07.035000 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3\": container with ID starting with eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3 not found: ID does not exist" containerID="eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.035081 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3"} err="failed to get container status \"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3\": rpc error: code = NotFound desc = could not find container \"eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3\": container with ID starting with eca8946e47611bb74098a5c93e945c189a14982f0d46fc39d53e020e600538c3 not found: ID does not exist" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.035113 4929 scope.go:117] "RemoveContainer" containerID="a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98" Nov 22 07:48:07 crc kubenswrapper[4929]: E1122 07:48:07.036782 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98\": container with ID starting with a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98 not found: ID does not exist" containerID="a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.036827 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98"} err="failed to get container status \"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98\": rpc error: code = NotFound desc = could not find container \"a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98\": container with ID starting with a73b7c14efb169d6eee0068377ddecd6023afd07723193f644d47e0dc8f6bd98 not found: ID does not exist" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.036856 4929 scope.go:117] "RemoveContainer" containerID="2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91" Nov 22 07:48:07 crc kubenswrapper[4929]: E1122 07:48:07.037264 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91\": container with ID starting with 2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91 not found: ID does not exist" containerID="2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.037284 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91"} err="failed to get container status \"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91\": rpc error: code = NotFound desc = could not find container \"2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91\": container with ID starting with 2bc8c4ff15f51a10b940e5a1164979f25046ff25f3e199bf3f8565674b71ba91 not found: ID does not exist" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.037296 4929 scope.go:117] "RemoveContainer" containerID="06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9" Nov 22 07:48:07 crc kubenswrapper[4929]: E1122 07:48:07.037523 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9\": container with ID starting with 06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9 not found: ID does not exist" containerID="06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.037544 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9"} err="failed to get container status \"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9\": rpc error: code = NotFound desc = could not find container \"06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9\": container with ID starting with 06e2340ce4822facc26d29496af654746b5e112297453cf7d399121d4a2908e9 not found: ID does not exist" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.089921 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxxw8\" (UniqueName: \"kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.089972 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090016 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090054 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090085 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090147 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090206 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.090236 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191384 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191426 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191482 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxxw8\" (UniqueName: \"kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191511 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191555 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191579 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191634 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.191927 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.192055 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.192125 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.194897 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.195744 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.195809 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.196262 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.198907 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.211413 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxxw8\" (UniqueName: \"kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8\") pod \"ceilometer-0\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.330585 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.798707 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:07 crc kubenswrapper[4929]: W1122 07:48:07.803051 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod239d9737_2451_4bf3_85da_12386735de1a.slice/crio-6a55c18106975eb4c8c28ad793c0e196659495f452242c6b332c8aba562761c6 WatchSource:0}: Error finding container 6a55c18106975eb4c8c28ad793c0e196659495f452242c6b332c8aba562761c6: Status 404 returned error can't find the container with id 6a55c18106975eb4c8c28ad793c0e196659495f452242c6b332c8aba562761c6 Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.939116 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerStarted","Data":"6a55c18106975eb4c8c28ad793c0e196659495f452242c6b332c8aba562761c6"} Nov 22 07:48:07 crc kubenswrapper[4929]: I1122 07:48:07.961560 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a4aa47c-57a9-4aee-86c3-730bd4cec70e" path="/var/lib/kubelet/pods/1a4aa47c-57a9-4aee-86c3-730bd4cec70e/volumes" Nov 22 07:48:08 crc kubenswrapper[4929]: I1122 07:48:08.080187 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:09 crc kubenswrapper[4929]: I1122 07:48:09.968189 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerStarted","Data":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} Nov 22 07:48:10 crc kubenswrapper[4929]: I1122 07:48:10.977689 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerStarted","Data":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} Nov 22 07:48:13 crc kubenswrapper[4929]: I1122 07:48:13.006560 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerStarted","Data":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041279 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerStarted","Data":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041869 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041490 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-notification-agent" containerID="cri-o://5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" gracePeriod=30 Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041402 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-central-agent" containerID="cri-o://0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" gracePeriod=30 Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041491 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="sg-core" containerID="cri-o://67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" gracePeriod=30 Nov 22 07:48:17 crc kubenswrapper[4929]: I1122 07:48:17.041565 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="proxy-httpd" containerID="cri-o://fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" gracePeriod=30 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.031244 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054248 4929 generic.go:334] "Generic (PLEG): container finished" podID="239d9737-2451-4bf3-85da-12386735de1a" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" exitCode=0 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054292 4929 generic.go:334] "Generic (PLEG): container finished" podID="239d9737-2451-4bf3-85da-12386735de1a" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" exitCode=2 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054304 4929 generic.go:334] "Generic (PLEG): container finished" podID="239d9737-2451-4bf3-85da-12386735de1a" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" exitCode=0 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054313 4929 generic.go:334] "Generic (PLEG): container finished" podID="239d9737-2451-4bf3-85da-12386735de1a" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" exitCode=0 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054335 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerDied","Data":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054367 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerDied","Data":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054383 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerDied","Data":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054397 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerDied","Data":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054411 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"239d9737-2451-4bf3-85da-12386735de1a","Type":"ContainerDied","Data":"6a55c18106975eb4c8c28ad793c0e196659495f452242c6b332c8aba562761c6"} Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054430 4929 scope.go:117] "RemoveContainer" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.054624 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.091804 4929 scope.go:117] "RemoveContainer" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097662 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxxw8\" (UniqueName: \"kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097716 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097761 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097796 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097817 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097870 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097893 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.097936 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs\") pod \"239d9737-2451-4bf3-85da-12386735de1a\" (UID: \"239d9737-2451-4bf3-85da-12386735de1a\") " Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.098421 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.098764 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.099256 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.099283 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/239d9737-2451-4bf3-85da-12386735de1a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.106790 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts" (OuterVolumeSpecName: "scripts") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.107145 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8" (OuterVolumeSpecName: "kube-api-access-qxxw8") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "kube-api-access-qxxw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.118558 4929 scope.go:117] "RemoveContainer" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.126815 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.150384 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.168564 4929 scope.go:117] "RemoveContainer" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.191458 4929 scope.go:117] "RemoveContainer" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.192159 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": container with ID starting with fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8 not found: ID does not exist" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192253 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} err="failed to get container status \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": rpc error: code = NotFound desc = could not find container \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": container with ID starting with fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192276 4929 scope.go:117] "RemoveContainer" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.192566 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": container with ID starting with 67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e not found: ID does not exist" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192592 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} err="failed to get container status \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": rpc error: code = NotFound desc = could not find container \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": container with ID starting with 67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192617 4929 scope.go:117] "RemoveContainer" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.192868 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": container with ID starting with 5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21 not found: ID does not exist" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192891 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} err="failed to get container status \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": rpc error: code = NotFound desc = could not find container \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": container with ID starting with 5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.192907 4929 scope.go:117] "RemoveContainer" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.193110 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": container with ID starting with 0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed not found: ID does not exist" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193136 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} err="failed to get container status \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": rpc error: code = NotFound desc = could not find container \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": container with ID starting with 0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193154 4929 scope.go:117] "RemoveContainer" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193497 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} err="failed to get container status \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": rpc error: code = NotFound desc = could not find container \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": container with ID starting with fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193542 4929 scope.go:117] "RemoveContainer" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193786 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} err="failed to get container status \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": rpc error: code = NotFound desc = could not find container \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": container with ID starting with 67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.193814 4929 scope.go:117] "RemoveContainer" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194028 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} err="failed to get container status \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": rpc error: code = NotFound desc = could not find container \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": container with ID starting with 5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194055 4929 scope.go:117] "RemoveContainer" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194284 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} err="failed to get container status \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": rpc error: code = NotFound desc = could not find container \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": container with ID starting with 0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194310 4929 scope.go:117] "RemoveContainer" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194554 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} err="failed to get container status \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": rpc error: code = NotFound desc = could not find container \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": container with ID starting with fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194575 4929 scope.go:117] "RemoveContainer" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194828 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} err="failed to get container status \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": rpc error: code = NotFound desc = could not find container \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": container with ID starting with 67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.194855 4929 scope.go:117] "RemoveContainer" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195106 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} err="failed to get container status \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": rpc error: code = NotFound desc = could not find container \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": container with ID starting with 5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195130 4929 scope.go:117] "RemoveContainer" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195372 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} err="failed to get container status \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": rpc error: code = NotFound desc = could not find container \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": container with ID starting with 0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195389 4929 scope.go:117] "RemoveContainer" containerID="fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195587 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8"} err="failed to get container status \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": rpc error: code = NotFound desc = could not find container \"fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8\": container with ID starting with fbe289b34cf1bd7da9ce21984ba357aa2364f1b99f437b93e4fd8d832c0fd5f8 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195603 4929 scope.go:117] "RemoveContainer" containerID="67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195795 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195813 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e"} err="failed to get container status \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": rpc error: code = NotFound desc = could not find container \"67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e\": container with ID starting with 67f8aadde626a3e4c27e8583ce2b29f3bd25bb718684bba78ee9a2dd9249a21e not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.195880 4929 scope.go:117] "RemoveContainer" containerID="5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.196202 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21"} err="failed to get container status \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": rpc error: code = NotFound desc = could not find container \"5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21\": container with ID starting with 5fa98038df3e77431c5062f63ea699dd96660ffb004b55457ac7811f9884fc21 not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.196253 4929 scope.go:117] "RemoveContainer" containerID="0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.196501 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed"} err="failed to get container status \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": rpc error: code = NotFound desc = could not find container \"0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed\": container with ID starting with 0b5746f48fb3c34d620a545d19f68ce3f5b76526d7351ca3e89c0dbb5db8faed not found: ID does not exist" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.200893 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.200922 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxxw8\" (UniqueName: \"kubernetes.io/projected/239d9737-2451-4bf3-85da-12386735de1a-kube-api-access-qxxw8\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.200937 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.200946 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.200954 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.215702 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data" (OuterVolumeSpecName: "config-data") pod "239d9737-2451-4bf3-85da-12386735de1a" (UID: "239d9737-2451-4bf3-85da-12386735de1a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.303068 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/239d9737-2451-4bf3-85da-12386735de1a-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.427232 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.437589 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.458571 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.458931 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="sg-core" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.458964 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="sg-core" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.458979 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-notification-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.458985 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-notification-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.459011 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-central-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459018 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-central-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: E1122 07:48:18.459029 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="proxy-httpd" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459035 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="proxy-httpd" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459202 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-central-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459232 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="ceilometer-notification-agent" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459242 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="proxy-httpd" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.459253 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="239d9737-2451-4bf3-85da-12386735de1a" containerName="sg-core" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.460873 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.463652 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.463652 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.464026 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.472232 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.519932 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.519986 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.520161 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z59kp\" (UniqueName: \"kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.520185 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.520226 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.520709 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.520817 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.521938 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.594167 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.594249 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.594284 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.595178 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.595306 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e" gracePeriod=600 Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.623852 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z59kp\" (UniqueName: \"kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624054 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624167 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624271 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624409 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624525 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624623 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.624695 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.625274 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.625517 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.629433 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.630458 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.630579 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.633415 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.638460 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.645619 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z59kp\" (UniqueName: \"kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp\") pod \"ceilometer-0\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " pod="openstack/ceilometer-0" Nov 22 07:48:18 crc kubenswrapper[4929]: I1122 07:48:18.776289 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:19 crc kubenswrapper[4929]: I1122 07:48:19.069833 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e" exitCode=0 Nov 22 07:48:19 crc kubenswrapper[4929]: I1122 07:48:19.069956 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e"} Nov 22 07:48:19 crc kubenswrapper[4929]: I1122 07:48:19.070326 4929 scope.go:117] "RemoveContainer" containerID="bba21a971307d98b9eee0cfadf6f309a1d41feae65416265f9f340988390f9c1" Nov 22 07:48:19 crc kubenswrapper[4929]: I1122 07:48:19.258004 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:19 crc kubenswrapper[4929]: W1122 07:48:19.261831 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9ec586b_1b51_4999_ae5e_5061c2f397c8.slice/crio-126b55fdb7fb75a0ebdd7893fa57be53d2245e8b23f51d599af20132963d6e7a WatchSource:0}: Error finding container 126b55fdb7fb75a0ebdd7893fa57be53d2245e8b23f51d599af20132963d6e7a: Status 404 returned error can't find the container with id 126b55fdb7fb75a0ebdd7893fa57be53d2245e8b23f51d599af20132963d6e7a Nov 22 07:48:19 crc kubenswrapper[4929]: I1122 07:48:19.959941 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="239d9737-2451-4bf3-85da-12386735de1a" path="/var/lib/kubelet/pods/239d9737-2451-4bf3-85da-12386735de1a/volumes" Nov 22 07:48:20 crc kubenswrapper[4929]: I1122 07:48:20.081956 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerStarted","Data":"126b55fdb7fb75a0ebdd7893fa57be53d2245e8b23f51d599af20132963d6e7a"} Nov 22 07:48:20 crc kubenswrapper[4929]: I1122 07:48:20.085989 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c"} Nov 22 07:48:21 crc kubenswrapper[4929]: I1122 07:48:21.098304 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerStarted","Data":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} Nov 22 07:48:21 crc kubenswrapper[4929]: I1122 07:48:21.098957 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerStarted","Data":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} Nov 22 07:48:22 crc kubenswrapper[4929]: I1122 07:48:22.109837 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerStarted","Data":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} Nov 22 07:48:24 crc kubenswrapper[4929]: I1122 07:48:24.852915 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:25 crc kubenswrapper[4929]: I1122 07:48:25.137038 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerStarted","Data":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} Nov 22 07:48:25 crc kubenswrapper[4929]: I1122 07:48:25.137424 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:48:25 crc kubenswrapper[4929]: I1122 07:48:25.163837 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.213296584 podStartE2EDuration="7.163819678s" podCreationTimestamp="2025-11-22 07:48:18 +0000 UTC" firstStartedPulling="2025-11-22 07:48:19.264156615 +0000 UTC m=+2236.373610628" lastFinishedPulling="2025-11-22 07:48:24.214679709 +0000 UTC m=+2241.324133722" observedRunningTime="2025-11-22 07:48:25.162472794 +0000 UTC m=+2242.271926807" watchObservedRunningTime="2025-11-22 07:48:25.163819678 +0000 UTC m=+2242.273273691" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.154826 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-central-agent" containerID="cri-o://b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" gracePeriod=30 Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.154917 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="sg-core" containerID="cri-o://caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" gracePeriod=30 Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.155039 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="proxy-httpd" containerID="cri-o://9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" gracePeriod=30 Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.154938 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-notification-agent" containerID="cri-o://bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" gracePeriod=30 Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.914006 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978656 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978726 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978785 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978835 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978936 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.978993 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.979022 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.979415 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.979123 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z59kp\" (UniqueName: \"kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp\") pod \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\" (UID: \"f9ec586b-1b51-4999-ae5e-5061c2f397c8\") " Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.979537 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.980437 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.980463 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f9ec586b-1b51-4999-ae5e-5061c2f397c8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.986540 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp" (OuterVolumeSpecName: "kube-api-access-z59kp") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "kube-api-access-z59kp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:26 crc kubenswrapper[4929]: I1122 07:48:26.989800 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts" (OuterVolumeSpecName: "scripts") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.030558 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.062416 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.077399 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.094234 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.094273 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.094291 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z59kp\" (UniqueName: \"kubernetes.io/projected/f9ec586b-1b51-4999-ae5e-5061c2f397c8-kube-api-access-z59kp\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.094305 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.094317 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.111043 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data" (OuterVolumeSpecName: "config-data") pod "f9ec586b-1b51-4999-ae5e-5061c2f397c8" (UID: "f9ec586b-1b51-4999-ae5e-5061c2f397c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.165945 4929 generic.go:334] "Generic (PLEG): container finished" podID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" exitCode=0 Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.165977 4929 generic.go:334] "Generic (PLEG): container finished" podID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" exitCode=2 Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.165984 4929 generic.go:334] "Generic (PLEG): container finished" podID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" exitCode=0 Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.165992 4929 generic.go:334] "Generic (PLEG): container finished" podID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" exitCode=0 Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166015 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerDied","Data":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166036 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166051 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerDied","Data":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166063 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerDied","Data":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166073 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerDied","Data":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166082 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f9ec586b-1b51-4999-ae5e-5061c2f397c8","Type":"ContainerDied","Data":"126b55fdb7fb75a0ebdd7893fa57be53d2245e8b23f51d599af20132963d6e7a"} Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.166097 4929 scope.go:117] "RemoveContainer" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.203691 4929 scope.go:117] "RemoveContainer" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.205863 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9ec586b-1b51-4999-ae5e-5061c2f397c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.230531 4929 scope.go:117] "RemoveContainer" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.230670 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.241160 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.255956 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.256521 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-central-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256539 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-central-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.256562 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-notification-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256569 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-notification-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.256599 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="sg-core" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256608 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="sg-core" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.256634 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="proxy-httpd" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256642 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="proxy-httpd" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256834 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-central-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256862 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="sg-core" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256885 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="proxy-httpd" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.256902 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" containerName="ceilometer-notification-agent" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.258859 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.265476 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.265805 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.265936 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.274062 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.276551 4929 scope.go:117] "RemoveContainer" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.339804 4929 scope.go:117] "RemoveContainer" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.340316 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": container with ID starting with 9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80 not found: ID does not exist" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.340398 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} err="failed to get container status \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": rpc error: code = NotFound desc = could not find container \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": container with ID starting with 9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.340442 4929 scope.go:117] "RemoveContainer" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.340830 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": container with ID starting with caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755 not found: ID does not exist" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.340857 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} err="failed to get container status \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": rpc error: code = NotFound desc = could not find container \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": container with ID starting with caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.340879 4929 scope.go:117] "RemoveContainer" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.341247 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": container with ID starting with bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee not found: ID does not exist" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341281 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} err="failed to get container status \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": rpc error: code = NotFound desc = could not find container \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": container with ID starting with bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341311 4929 scope.go:117] "RemoveContainer" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: E1122 07:48:27.341547 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": container with ID starting with b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c not found: ID does not exist" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341577 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} err="failed to get container status \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": rpc error: code = NotFound desc = could not find container \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": container with ID starting with b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341596 4929 scope.go:117] "RemoveContainer" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341854 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} err="failed to get container status \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": rpc error: code = NotFound desc = could not find container \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": container with ID starting with 9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.341879 4929 scope.go:117] "RemoveContainer" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342139 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} err="failed to get container status \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": rpc error: code = NotFound desc = could not find container \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": container with ID starting with caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342161 4929 scope.go:117] "RemoveContainer" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342386 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} err="failed to get container status \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": rpc error: code = NotFound desc = could not find container \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": container with ID starting with bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342426 4929 scope.go:117] "RemoveContainer" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342617 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} err="failed to get container status \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": rpc error: code = NotFound desc = could not find container \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": container with ID starting with b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342655 4929 scope.go:117] "RemoveContainer" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342894 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} err="failed to get container status \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": rpc error: code = NotFound desc = could not find container \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": container with ID starting with 9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.342944 4929 scope.go:117] "RemoveContainer" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343196 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} err="failed to get container status \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": rpc error: code = NotFound desc = could not find container \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": container with ID starting with caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343237 4929 scope.go:117] "RemoveContainer" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343456 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} err="failed to get container status \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": rpc error: code = NotFound desc = could not find container \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": container with ID starting with bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343478 4929 scope.go:117] "RemoveContainer" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343697 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} err="failed to get container status \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": rpc error: code = NotFound desc = could not find container \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": container with ID starting with b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343727 4929 scope.go:117] "RemoveContainer" containerID="9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343943 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80"} err="failed to get container status \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": rpc error: code = NotFound desc = could not find container \"9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80\": container with ID starting with 9f7a0b46e5f4b2a0d6795df1cee830726c654dc2b9e4ec4d1c225a12e76abd80 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.343965 4929 scope.go:117] "RemoveContainer" containerID="caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.344198 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755"} err="failed to get container status \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": rpc error: code = NotFound desc = could not find container \"caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755\": container with ID starting with caf468dceea72374318fbfd46e5bab21b4b2d8ce8cb6dd5dd722e1199caed755 not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.344235 4929 scope.go:117] "RemoveContainer" containerID="bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.344525 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee"} err="failed to get container status \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": rpc error: code = NotFound desc = could not find container \"bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee\": container with ID starting with bd155d76754ab7f65806ff787f1ae43a922f1663640988065ef8c54ca289c9ee not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.344559 4929 scope.go:117] "RemoveContainer" containerID="b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.344808 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c"} err="failed to get container status \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": rpc error: code = NotFound desc = could not find container \"b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c\": container with ID starting with b64e348596a5ddb2aab9cd7986925fd46663ea8186b7c59f8f7eeab970d06d7c not found: ID does not exist" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.412443 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.412621 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.412703 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssbzk\" (UniqueName: \"kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.412834 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.413019 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.413095 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.413409 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.413513 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515269 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515358 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssbzk\" (UniqueName: \"kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515398 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515427 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515446 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515485 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515516 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515573 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515752 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.515939 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.519175 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.519688 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.519821 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.519921 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.528114 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.534052 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssbzk\" (UniqueName: \"kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk\") pod \"ceilometer-0\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.638173 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:27 crc kubenswrapper[4929]: I1122 07:48:27.964005 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9ec586b-1b51-4999-ae5e-5061c2f397c8" path="/var/lib/kubelet/pods/f9ec586b-1b51-4999-ae5e-5061c2f397c8/volumes" Nov 22 07:48:28 crc kubenswrapper[4929]: I1122 07:48:28.157701 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:28 crc kubenswrapper[4929]: W1122 07:48:28.161321 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod329c9bc2_50a1_427a_98b7_0f090f6b96a4.slice/crio-4d4b12d017bc72e78eb9a6ed927010faeb885ae57935ee4fd86274608a426175 WatchSource:0}: Error finding container 4d4b12d017bc72e78eb9a6ed927010faeb885ae57935ee4fd86274608a426175: Status 404 returned error can't find the container with id 4d4b12d017bc72e78eb9a6ed927010faeb885ae57935ee4fd86274608a426175 Nov 22 07:48:28 crc kubenswrapper[4929]: I1122 07:48:28.178712 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerStarted","Data":"4d4b12d017bc72e78eb9a6ed927010faeb885ae57935ee4fd86274608a426175"} Nov 22 07:48:31 crc kubenswrapper[4929]: I1122 07:48:31.212193 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerStarted","Data":"9901daf13cede55041f32ea5d8ac658df95c2d62a0cdd7628ac29cb0989ddd0e"} Nov 22 07:48:32 crc kubenswrapper[4929]: I1122 07:48:32.229143 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerStarted","Data":"1631db5cbb51990c077041a41225842f9331868e7422339568c865c72f4ce0a6"} Nov 22 07:48:33 crc kubenswrapper[4929]: I1122 07:48:33.244090 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerStarted","Data":"8448c21157ce28870a3e3794f48d4bbd7558123e88575a3c9e18d35501314e02"} Nov 22 07:48:34 crc kubenswrapper[4929]: I1122 07:48:34.271881 4929 generic.go:334] "Generic (PLEG): container finished" podID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" containerID="fdbc2fa7daba9dc24b636806a887c51ccd87efa8c07ad9d0eef10f7bb914a961" exitCode=0 Nov 22 07:48:34 crc kubenswrapper[4929]: I1122 07:48:34.273256 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9cvcv" event={"ID":"7e5572bd-b4bf-4476-9247-06d7c892dcf1","Type":"ContainerDied","Data":"fdbc2fa7daba9dc24b636806a887c51ccd87efa8c07ad9d0eef10f7bb914a961"} Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.287058 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerStarted","Data":"2f7c9c608c81af699c1c03f4230ff74014aa0dd9095d58c7297dacc2d33f0fcd"} Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.287625 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.320395 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.2427876700000002 podStartE2EDuration="8.320312498s" podCreationTimestamp="2025-11-22 07:48:27 +0000 UTC" firstStartedPulling="2025-11-22 07:48:28.163935953 +0000 UTC m=+2245.273389986" lastFinishedPulling="2025-11-22 07:48:34.241460761 +0000 UTC m=+2251.350914814" observedRunningTime="2025-11-22 07:48:35.310106495 +0000 UTC m=+2252.419560518" watchObservedRunningTime="2025-11-22 07:48:35.320312498 +0000 UTC m=+2252.429766511" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.628894 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.671860 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data\") pod \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.672477 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle\") pod \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.672855 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjgpl\" (UniqueName: \"kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl\") pod \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\" (UID: \"7e5572bd-b4bf-4476-9247-06d7c892dcf1\") " Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.678766 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7e5572bd-b4bf-4476-9247-06d7c892dcf1" (UID: "7e5572bd-b4bf-4476-9247-06d7c892dcf1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.678864 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl" (OuterVolumeSpecName: "kube-api-access-xjgpl") pod "7e5572bd-b4bf-4476-9247-06d7c892dcf1" (UID: "7e5572bd-b4bf-4476-9247-06d7c892dcf1"). InnerVolumeSpecName "kube-api-access-xjgpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.701348 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e5572bd-b4bf-4476-9247-06d7c892dcf1" (UID: "7e5572bd-b4bf-4476-9247-06d7c892dcf1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.775499 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjgpl\" (UniqueName: \"kubernetes.io/projected/7e5572bd-b4bf-4476-9247-06d7c892dcf1-kube-api-access-xjgpl\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.775551 4929 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:35 crc kubenswrapper[4929]: I1122 07:48:35.775562 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e5572bd-b4bf-4476-9247-06d7c892dcf1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.296378 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-9cvcv" event={"ID":"7e5572bd-b4bf-4476-9247-06d7c892dcf1","Type":"ContainerDied","Data":"4a4cf227f45b5362beabfc9cd08fc878d00eeb7c24e292f81338ef4b54f02562"} Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.296724 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a4cf227f45b5362beabfc9cd08fc878d00eeb7c24e292f81338ef4b54f02562" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.296409 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-9cvcv" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.560070 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6dc6c45b7d-zjztr"] Nov 22 07:48:36 crc kubenswrapper[4929]: E1122 07:48:36.562694 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" containerName="barbican-db-sync" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.562732 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" containerName="barbican-db-sync" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.563017 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" containerName="barbican-db-sync" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.564363 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.568233 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.568361 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.568485 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-z6srv" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.594962 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb931a9c-bada-431f-aaae-d3c1603b8d37-logs\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.595033 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data-custom\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.595065 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-combined-ca-bundle\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.595145 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.595181 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94tcq\" (UniqueName: \"kubernetes.io/projected/cb931a9c-bada-431f-aaae-d3c1603b8d37-kube-api-access-94tcq\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.598109 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-865648cb87-24j78"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.600070 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.602466 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.617386 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6dc6c45b7d-zjztr"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.634277 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-865648cb87-24j78"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.690928 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.692972 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697321 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data-custom\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697360 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94tcq\" (UniqueName: \"kubernetes.io/projected/cb931a9c-bada-431f-aaae-d3c1603b8d37-kube-api-access-94tcq\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697387 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dl4f\" (UniqueName: \"kubernetes.io/projected/8ba5625d-6646-4b1c-aace-040845557c79-kube-api-access-2dl4f\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697436 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697467 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb931a9c-bada-431f-aaae-d3c1603b8d37-logs\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697505 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-combined-ca-bundle\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697523 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data-custom\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-combined-ca-bundle\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697576 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ba5625d-6646-4b1c-aace-040845557c79-logs\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.697631 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.700783 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cb931a9c-bada-431f-aaae-d3c1603b8d37-logs\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.703526 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.705999 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-combined-ca-bundle\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.711969 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data-custom\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.726150 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb931a9c-bada-431f-aaae-d3c1603b8d37-config-data\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.735799 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94tcq\" (UniqueName: \"kubernetes.io/projected/cb931a9c-bada-431f-aaae-d3c1603b8d37-kube-api-access-94tcq\") pod \"barbican-keystone-listener-6dc6c45b7d-zjztr\" (UID: \"cb931a9c-bada-431f-aaae-d3c1603b8d37\") " pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800288 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data-custom\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800337 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dl4f\" (UniqueName: \"kubernetes.io/projected/8ba5625d-6646-4b1c-aace-040845557c79-kube-api-access-2dl4f\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800373 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800395 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xc4s7\" (UniqueName: \"kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800440 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800462 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800487 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.800507 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.801473 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-combined-ca-bundle\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.801618 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ba5625d-6646-4b1c-aace-040845557c79-logs\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.801691 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.811077 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8ba5625d-6646-4b1c-aace-040845557c79-logs\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.811345 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data-custom\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.811626 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-config-data\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.818675 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.820914 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ba5625d-6646-4b1c-aace-040845557c79-combined-ca-bundle\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.824802 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dl4f\" (UniqueName: \"kubernetes.io/projected/8ba5625d-6646-4b1c-aace-040845557c79-kube-api-access-2dl4f\") pod \"barbican-worker-865648cb87-24j78\" (UID: \"8ba5625d-6646-4b1c-aace-040845557c79\") " pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.829256 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.831794 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.855345 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.892416 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903451 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903555 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903599 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903622 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xc4s7\" (UniqueName: \"kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903679 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903703 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903723 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903752 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903768 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klkdf\" (UniqueName: \"kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903787 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.903808 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.904608 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.904689 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.904712 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.905102 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.906905 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.927694 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-865648cb87-24j78" Nov 22 07:48:36 crc kubenswrapper[4929]: I1122 07:48:36.928593 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xc4s7\" (UniqueName: \"kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7\") pod \"dnsmasq-dns-58957f86ff-4pswn\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.006955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.007121 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.007165 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.007188 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klkdf\" (UniqueName: \"kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.007371 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.010262 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.010844 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.013236 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.015460 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.026247 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klkdf\" (UniqueName: \"kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf\") pod \"barbican-api-68cf69f54-cvmvj\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.107882 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.194546 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.407635 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6dc6c45b7d-zjztr"] Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.506659 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-865648cb87-24j78"] Nov 22 07:48:37 crc kubenswrapper[4929]: W1122 07:48:37.515359 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ba5625d_6646_4b1c_aace_040845557c79.slice/crio-757a0c97a1d5771d4135e365819cfd93a9f3659bfab585db139c5756ae37d8b1 WatchSource:0}: Error finding container 757a0c97a1d5771d4135e365819cfd93a9f3659bfab585db139c5756ae37d8b1: Status 404 returned error can't find the container with id 757a0c97a1d5771d4135e365819cfd93a9f3659bfab585db139c5756ae37d8b1 Nov 22 07:48:37 crc kubenswrapper[4929]: W1122 07:48:37.610558 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91ce7752_be0e_4efe_8ab5_91312f1e8c5a.slice/crio-e4751e26062feaeeaf04752146fc63074005c4d0083aa9c1a1a67e6398e4c483 WatchSource:0}: Error finding container e4751e26062feaeeaf04752146fc63074005c4d0083aa9c1a1a67e6398e4c483: Status 404 returned error can't find the container with id e4751e26062feaeeaf04752146fc63074005c4d0083aa9c1a1a67e6398e4c483 Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.632751 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:37 crc kubenswrapper[4929]: I1122 07:48:37.939566 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.330235 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" event={"ID":"cb931a9c-bada-431f-aaae-d3c1603b8d37","Type":"ContainerStarted","Data":"768f3cfcf54539a1fb96a061f6de32fc4f7115d9533c96da40c0e7253da0c915"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.332445 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerStarted","Data":"4fee07e82750c13c657fa3ec87573da70fdde66c21767b4723fde145210d80f7"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.332483 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerStarted","Data":"6a5deacd589a08f25ac012105a2e612edb3d0d829951db3182a732079ee6aa3c"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.334290 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-865648cb87-24j78" event={"ID":"8ba5625d-6646-4b1c-aace-040845557c79","Type":"ContainerStarted","Data":"757a0c97a1d5771d4135e365819cfd93a9f3659bfab585db139c5756ae37d8b1"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.337107 4929 generic.go:334] "Generic (PLEG): container finished" podID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerID="548ed429b9974bd0174fbd541ede91b8da048d8c1b7e74429141a9c2c8113f73" exitCode=0 Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.337180 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" event={"ID":"91ce7752-be0e-4efe-8ab5-91312f1e8c5a","Type":"ContainerDied","Data":"548ed429b9974bd0174fbd541ede91b8da048d8c1b7e74429141a9c2c8113f73"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.337341 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" event={"ID":"91ce7752-be0e-4efe-8ab5-91312f1e8c5a","Type":"ContainerStarted","Data":"e4751e26062feaeeaf04752146fc63074005c4d0083aa9c1a1a67e6398e4c483"} Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.345392 4929 generic.go:334] "Generic (PLEG): container finished" podID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" containerID="5cc0a46aa7a491ff2d9f413a47650e35c1e27c38bbf7da737dba0db4e69791b5" exitCode=0 Nov 22 07:48:38 crc kubenswrapper[4929]: I1122 07:48:38.345429 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r6lzk" event={"ID":"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c","Type":"ContainerDied","Data":"5cc0a46aa7a491ff2d9f413a47650e35c1e27c38bbf7da737dba0db4e69791b5"} Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.362229 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerStarted","Data":"2620af1a3ffa6ae9353c4a43eb0f1a0fbd7de998beb3bacffc68a0dbeb67c02a"} Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.363047 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.363150 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.371619 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" event={"ID":"91ce7752-be0e-4efe-8ab5-91312f1e8c5a","Type":"ContainerStarted","Data":"b95a78816fd7a5bde0bdc2127ffccd44cd070ccea0d386bac84c73bcb28bc802"} Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.385824 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68cf69f54-cvmvj" podStartSLOduration=3.385809287 podStartE2EDuration="3.385809287s" podCreationTimestamp="2025-11-22 07:48:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:39.380627948 +0000 UTC m=+2256.490081961" watchObservedRunningTime="2025-11-22 07:48:39.385809287 +0000 UTC m=+2256.495263300" Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.411735 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" podStartSLOduration=3.41171771 podStartE2EDuration="3.41171771s" podCreationTimestamp="2025-11-22 07:48:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:39.41051527 +0000 UTC m=+2256.519969393" watchObservedRunningTime="2025-11-22 07:48:39.41171771 +0000 UTC m=+2256.521171733" Nov 22 07:48:39 crc kubenswrapper[4929]: I1122 07:48:39.911961 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.004258 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7486bd8978-4r6hg"] Nov 22 07:48:40 crc kubenswrapper[4929]: E1122 07:48:40.004706 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" containerName="cinder-db-sync" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.004725 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" containerName="cinder-db-sync" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.004896 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" containerName="cinder-db-sync" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.005826 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7486bd8978-4r6hg"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.005908 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.008040 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.008507 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070175 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070332 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc4mg\" (UniqueName: \"kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070341 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070557 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070614 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070648 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.070681 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle\") pod \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\" (UID: \"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c\") " Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.071130 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-internal-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.071321 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.080590 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data-custom\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.080722 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-combined-ca-bundle\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.080788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzg6v\" (UniqueName: \"kubernetes.io/projected/edd001b8-6115-447e-bf2e-89d0a843f681-kube-api-access-bzg6v\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.081873 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg" (OuterVolumeSpecName: "kube-api-access-dc4mg") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "kube-api-access-dc4mg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.083417 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.082121 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-public-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.084204 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edd001b8-6115-447e-bf2e-89d0a843f681-logs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.084348 4929 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.084366 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc4mg\" (UniqueName: \"kubernetes.io/projected/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-kube-api-access-dc4mg\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.084379 4929 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.087655 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts" (OuterVolumeSpecName: "scripts") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.109932 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.152453 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data" (OuterVolumeSpecName: "config-data") pod "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" (UID: "4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185608 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-internal-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185704 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185745 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data-custom\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185775 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-combined-ca-bundle\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185805 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzg6v\" (UniqueName: \"kubernetes.io/projected/edd001b8-6115-447e-bf2e-89d0a843f681-kube-api-access-bzg6v\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185827 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-public-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185847 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edd001b8-6115-447e-bf2e-89d0a843f681-logs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185912 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185922 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.185932 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.186297 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/edd001b8-6115-447e-bf2e-89d0a843f681-logs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.190973 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-public-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.192696 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.193021 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-internal-tls-certs\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.193398 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-combined-ca-bundle\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.196509 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/edd001b8-6115-447e-bf2e-89d0a843f681-config-data-custom\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.204659 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzg6v\" (UniqueName: \"kubernetes.io/projected/edd001b8-6115-447e-bf2e-89d0a843f681-kube-api-access-bzg6v\") pod \"barbican-api-7486bd8978-4r6hg\" (UID: \"edd001b8-6115-447e-bf2e-89d0a843f681\") " pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.333190 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.384958 4929 generic.go:334] "Generic (PLEG): container finished" podID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" containerID="5dde2d3f2168bf10e3c514fbd3d384a32beed85e1e86d848393685a49e346cca" exitCode=0 Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.385065 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q7499" event={"ID":"8a027f20-aeb5-4af3-9ccc-c4271d8717d1","Type":"ContainerDied","Data":"5dde2d3f2168bf10e3c514fbd3d384a32beed85e1e86d848393685a49e346cca"} Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.399190 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-r6lzk" event={"ID":"4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c","Type":"ContainerDied","Data":"060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564"} Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.399271 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="060e83b3ff859ca1d132708edb3afd5bf4c90835c78638b1cef721d71983e564" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.399281 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-r6lzk" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.399639 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.596202 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.596844 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-central-agent" containerID="cri-o://9901daf13cede55041f32ea5d8ac658df95c2d62a0cdd7628ac29cb0989ddd0e" gracePeriod=30 Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.596991 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="proxy-httpd" containerID="cri-o://2f7c9c608c81af699c1c03f4230ff74014aa0dd9095d58c7297dacc2d33f0fcd" gracePeriod=30 Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.597016 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-notification-agent" containerID="cri-o://1631db5cbb51990c077041a41225842f9331868e7422339568c865c72f4ce0a6" gracePeriod=30 Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.597034 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="sg-core" containerID="cri-o://8448c21157ce28870a3e3794f48d4bbd7558123e88575a3c9e18d35501314e02" gracePeriod=30 Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.681979 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.683707 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.686530 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8sbpl" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.687636 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.687800 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.692840 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.720041 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.761050 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.799687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.799940 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.800051 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.800176 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.800310 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbfm2\" (UniqueName: \"kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.800416 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.800596 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.802247 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.846243 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905283 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905339 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905379 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905421 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905469 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905517 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbfm2\" (UniqueName: \"kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905546 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905627 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905671 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905730 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gggrk\" (UniqueName: \"kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905777 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905829 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.905956 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.913354 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.916300 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.917716 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.923955 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.924522 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.924781 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.925631 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.939239 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbfm2\" (UniqueName: \"kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2\") pod \"cinder-scheduler-0\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:40 crc kubenswrapper[4929]: I1122 07:48:40.970271 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012425 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012482 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012521 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mxqn\" (UniqueName: \"kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012545 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012567 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012619 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012668 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012705 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012731 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012776 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gggrk\" (UniqueName: \"kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.012820 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.015943 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.016480 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.017064 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.017297 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.018000 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.018129 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.039919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gggrk\" (UniqueName: \"kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk\") pod \"dnsmasq-dns-7bf4c8dd6c-btzlg\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.045008 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.114908 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.114952 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.114975 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.115029 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.115099 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.115161 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mxqn\" (UniqueName: \"kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.115182 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.119668 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.129516 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.130584 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.134929 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.149901 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.150491 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.156733 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mxqn\" (UniqueName: \"kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn\") pod \"cinder-api-0\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.427111 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.468053 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" event={"ID":"cb931a9c-bada-431f-aaae-d3c1603b8d37","Type":"ContainerStarted","Data":"82d1d36c86b0cea6a2dcb164be02c9f615b6fdf0d6bae2d0ecf2317b5bd7bf12"} Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.482827 4929 generic.go:334] "Generic (PLEG): container finished" podID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerID="2f7c9c608c81af699c1c03f4230ff74014aa0dd9095d58c7297dacc2d33f0fcd" exitCode=0 Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.482857 4929 generic.go:334] "Generic (PLEG): container finished" podID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerID="8448c21157ce28870a3e3794f48d4bbd7558123e88575a3c9e18d35501314e02" exitCode=2 Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.482865 4929 generic.go:334] "Generic (PLEG): container finished" podID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerID="1631db5cbb51990c077041a41225842f9331868e7422339568c865c72f4ce0a6" exitCode=0 Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.482874 4929 generic.go:334] "Generic (PLEG): container finished" podID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerID="9901daf13cede55041f32ea5d8ac658df95c2d62a0cdd7628ac29cb0989ddd0e" exitCode=0 Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.483598 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerDied","Data":"2f7c9c608c81af699c1c03f4230ff74014aa0dd9095d58c7297dacc2d33f0fcd"} Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.483632 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerDied","Data":"8448c21157ce28870a3e3794f48d4bbd7558123e88575a3c9e18d35501314e02"} Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.483643 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerDied","Data":"1631db5cbb51990c077041a41225842f9331868e7422339568c865c72f4ce0a6"} Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.483653 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerDied","Data":"9901daf13cede55041f32ea5d8ac658df95c2d62a0cdd7628ac29cb0989ddd0e"} Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.633180 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7486bd8978-4r6hg"] Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.730746 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:41 crc kubenswrapper[4929]: I1122 07:48:41.971873 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:41 crc kubenswrapper[4929]: W1122 07:48:41.977425 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59b1c0b3_02ff_4ab0_9708_7e3ec7035790.slice/crio-6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b WatchSource:0}: Error finding container 6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b: Status 404 returned error can't find the container with id 6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.265428 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.284937 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.295146 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q7499" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357101 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357169 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357192 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data\") pod \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357314 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssbzk\" (UniqueName: \"kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357345 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357388 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357477 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle\") pod \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357511 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357613 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357689 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data\") pod \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357724 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data\") pod \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\" (UID: \"329c9bc2-50a1-427a-98b7-0f090f6b96a4\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.357770 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fj8kh\" (UniqueName: \"kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh\") pod \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\" (UID: \"8a027f20-aeb5-4af3-9ccc-c4271d8717d1\") " Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.359503 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.362939 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.369872 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts" (OuterVolumeSpecName: "scripts") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.369904 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh" (OuterVolumeSpecName: "kube-api-access-fj8kh") pod "8a027f20-aeb5-4af3-9ccc-c4271d8717d1" (UID: "8a027f20-aeb5-4af3-9ccc-c4271d8717d1"). InnerVolumeSpecName "kube-api-access-fj8kh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.369965 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "8a027f20-aeb5-4af3-9ccc-c4271d8717d1" (UID: "8a027f20-aeb5-4af3-9ccc-c4271d8717d1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.372831 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk" (OuterVolumeSpecName: "kube-api-access-ssbzk") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "kube-api-access-ssbzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.460868 4929 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.460947 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fj8kh\" (UniqueName: \"kubernetes.io/projected/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-kube-api-access-fj8kh\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.460977 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.460988 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssbzk\" (UniqueName: \"kubernetes.io/projected/329c9bc2-50a1-427a-98b7-0f090f6b96a4-kube-api-access-ssbzk\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.460998 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.461008 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/329c9bc2-50a1-427a-98b7-0f090f6b96a4-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.480828 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.498029 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-865648cb87-24j78" event={"ID":"8ba5625d-6646-4b1c-aace-040845557c79","Type":"ContainerStarted","Data":"1f0402d3f3db994ab024e56cd44938620df58cf25af9347b1ca5052b7689b674"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.498082 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-865648cb87-24j78" event={"ID":"8ba5625d-6646-4b1c-aace-040845557c79","Type":"ContainerStarted","Data":"c1638cf25f7c41a2e696c378556c3f90b124e3a6b0eb71a78ea000f126e4b7d8"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.520995 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"329c9bc2-50a1-427a-98b7-0f090f6b96a4","Type":"ContainerDied","Data":"4d4b12d017bc72e78eb9a6ed927010faeb885ae57935ee4fd86274608a426175"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.521052 4929 scope.go:117] "RemoveContainer" containerID="2f7c9c608c81af699c1c03f4230ff74014aa0dd9095d58c7297dacc2d33f0fcd" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.521180 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.529513 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerStarted","Data":"e7c452a481dffd6c16b9b929d020ef9c519c66674fb4e3a58e510d1439a76949"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.532146 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-865648cb87-24j78" podStartSLOduration=3.133123054 podStartE2EDuration="6.532088139s" podCreationTimestamp="2025-11-22 07:48:36 +0000 UTC" firstStartedPulling="2025-11-22 07:48:37.530425175 +0000 UTC m=+2254.639879188" lastFinishedPulling="2025-11-22 07:48:40.92939027 +0000 UTC m=+2258.038844273" observedRunningTime="2025-11-22 07:48:42.529721131 +0000 UTC m=+2259.639175144" watchObservedRunningTime="2025-11-22 07:48:42.532088139 +0000 UTC m=+2259.641542152" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.535061 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" event={"ID":"cb931a9c-bada-431f-aaae-d3c1603b8d37","Type":"ContainerStarted","Data":"9fb01ebae2e6af009e9d7b425b5c2046179c2a73df4c3fce83fa54062a0dfd7f"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.541451 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" event={"ID":"59b1c0b3-02ff-4ab0-9708-7e3ec7035790","Type":"ContainerStarted","Data":"6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.547442 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7486bd8978-4r6hg" event={"ID":"edd001b8-6115-447e-bf2e-89d0a843f681","Type":"ContainerStarted","Data":"50ce40a627ccc31d0195e768535ac027e79eb08329446baa6d1577ea9bad4bd3"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.547512 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7486bd8978-4r6hg" event={"ID":"edd001b8-6115-447e-bf2e-89d0a843f681","Type":"ContainerStarted","Data":"65c4f65c4f48e66db0eb6b4ab306e806aefa93ee3352cfa3f9ccdf155e8c31c4"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.547528 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7486bd8978-4r6hg" event={"ID":"edd001b8-6115-447e-bf2e-89d0a843f681","Type":"ContainerStarted","Data":"b2ee45839465d3559dd2afc92e8f5c7c772ae09ac8759302544384f9eabe2c41"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.548528 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.548576 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.551749 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q7499" event={"ID":"8a027f20-aeb5-4af3-9ccc-c4271d8717d1","Type":"ContainerDied","Data":"0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.551777 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0fcf1d9e65a9e5d874c4fbe25d518af6331f955ee337bfa68e9799de1a783e80" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.551821 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q7499" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.562937 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.569998 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="dnsmasq-dns" containerID="cri-o://b95a78816fd7a5bde0bdc2127ffccd44cd070ccea0d386bac84c73bcb28bc802" gracePeriod=10 Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.570341 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerStarted","Data":"712ae19f01377c7ab8dfba4ed3bf0ba7ec9eea2cb3d447f9925a60ca91ba27b4"} Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.592005 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6dc6c45b7d-zjztr" podStartSLOduration=3.082941779 podStartE2EDuration="6.591979676s" podCreationTimestamp="2025-11-22 07:48:36 +0000 UTC" firstStartedPulling="2025-11-22 07:48:37.418452906 +0000 UTC m=+2254.527906909" lastFinishedPulling="2025-11-22 07:48:40.927490793 +0000 UTC m=+2258.036944806" observedRunningTime="2025-11-22 07:48:42.55710077 +0000 UTC m=+2259.666554793" watchObservedRunningTime="2025-11-22 07:48:42.591979676 +0000 UTC m=+2259.701433689" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.607036 4929 scope.go:117] "RemoveContainer" containerID="8448c21157ce28870a3e3794f48d4bbd7558123e88575a3c9e18d35501314e02" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.631026 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7486bd8978-4r6hg" podStartSLOduration=3.631002084 podStartE2EDuration="3.631002084s" podCreationTimestamp="2025-11-22 07:48:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:42.582189283 +0000 UTC m=+2259.691643296" watchObservedRunningTime="2025-11-22 07:48:42.631002084 +0000 UTC m=+2259.740456097" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.664377 4929 scope.go:117] "RemoveContainer" containerID="1631db5cbb51990c077041a41225842f9331868e7422339568c865c72f4ce0a6" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.664553 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8a027f20-aeb5-4af3-9ccc-c4271d8717d1" (UID: "8a027f20-aeb5-4af3-9ccc-c4271d8717d1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.665071 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.714406 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data" (OuterVolumeSpecName: "config-data") pod "8a027f20-aeb5-4af3-9ccc-c4271d8717d1" (UID: "8a027f20-aeb5-4af3-9ccc-c4271d8717d1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.744406 4929 scope.go:117] "RemoveContainer" containerID="9901daf13cede55041f32ea5d8ac658df95c2d62a0cdd7628ac29cb0989ddd0e" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.793729 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8a027f20-aeb5-4af3-9ccc-c4271d8717d1-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.796472 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.900869 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:42 crc kubenswrapper[4929]: I1122 07:48:42.931806 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.035592 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:48:43 crc kubenswrapper[4929]: E1122 07:48:43.036031 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-central-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036048 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-central-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: E1122 07:48:43.036064 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-notification-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036071 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-notification-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: E1122 07:48:43.036083 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" containerName="glance-db-sync" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036090 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" containerName="glance-db-sync" Nov 22 07:48:43 crc kubenswrapper[4929]: E1122 07:48:43.036106 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="sg-core" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036112 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="sg-core" Nov 22 07:48:43 crc kubenswrapper[4929]: E1122 07:48:43.036121 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="proxy-httpd" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036127 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="proxy-httpd" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036333 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" containerName="glance-db-sync" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036351 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-central-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036361 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="sg-core" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036388 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="proxy-httpd" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.036400 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" containerName="ceilometer-notification-agent" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.037566 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.058737 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.088041 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data" (OuterVolumeSpecName: "config-data") pod "329c9bc2-50a1-427a-98b7-0f090f6b96a4" (UID: "329c9bc2-50a1-427a-98b7-0f090f6b96a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.133937 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.133989 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134012 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134084 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134112 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhbk9\" (UniqueName: \"kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134146 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134193 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.134204 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/329c9bc2-50a1-427a-98b7-0f090f6b96a4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.141359 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236257 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236286 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236354 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236380 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhbk9\" (UniqueName: \"kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.236409 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.237277 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.237542 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.237927 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.238147 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.238778 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.273164 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhbk9\" (UniqueName: \"kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9\") pod \"dnsmasq-dns-795f4db4bc-jr28r\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.405014 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.511315 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.513380 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.534464 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.551023 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.587312 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.612118 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.619860 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.620009 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654516 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654571 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654612 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654640 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654712 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654758 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654787 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.654820 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgr9l\" (UniqueName: \"kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.669519 4929 generic.go:334] "Generic (PLEG): container finished" podID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerID="b95a78816fd7a5bde0bdc2127ffccd44cd070ccea0d386bac84c73bcb28bc802" exitCode=0 Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.669652 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" event={"ID":"91ce7752-be0e-4efe-8ab5-91312f1e8c5a","Type":"ContainerDied","Data":"b95a78816fd7a5bde0bdc2127ffccd44cd070ccea0d386bac84c73bcb28bc802"} Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.698018 4929 generic.go:334] "Generic (PLEG): container finished" podID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" containerID="51ad19043808b71d755b018d8d85cbf4e65b31ba04768d75d478c5e41b7914cd" exitCode=0 Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.698815 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" event={"ID":"59b1c0b3-02ff-4ab0-9708-7e3ec7035790","Type":"ContainerDied","Data":"51ad19043808b71d755b018d8d85cbf4e65b31ba04768d75d478c5e41b7914cd"} Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.741846 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756598 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756649 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756679 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756699 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756760 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756801 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756828 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.756854 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgr9l\" (UniqueName: \"kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.758291 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.758335 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.764015 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.764623 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.766412 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.767157 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.769069 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.796120 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgr9l\" (UniqueName: \"kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l\") pod \"ceilometer-0\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.859887 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.859935 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.859976 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xc4s7\" (UniqueName: \"kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.860018 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.860255 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.860297 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config\") pod \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\" (UID: \"91ce7752-be0e-4efe-8ab5-91312f1e8c5a\") " Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.870650 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7" (OuterVolumeSpecName: "kube-api-access-xc4s7") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "kube-api-access-xc4s7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.945923 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.957687 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.962909 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.962932 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xc4s7\" (UniqueName: \"kubernetes.io/projected/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-kube-api-access-xc4s7\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:43 crc kubenswrapper[4929]: I1122 07:48:43.986789 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="329c9bc2-50a1-427a-98b7-0f090f6b96a4" path="/var/lib/kubelet/pods/329c9bc2-50a1-427a-98b7-0f090f6b96a4/volumes" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.024281 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config" (OuterVolumeSpecName: "config") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.039712 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.051547 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.071781 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.071817 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.071829 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.111433 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "91ce7752-be0e-4efe-8ab5-91312f1e8c5a" (UID: "91ce7752-be0e-4efe-8ab5-91312f1e8c5a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.163558 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:44 crc kubenswrapper[4929]: E1122 07:48:44.164178 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="init" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.164195 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="init" Nov 22 07:48:44 crc kubenswrapper[4929]: E1122 07:48:44.164237 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="dnsmasq-dns" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.164247 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="dnsmasq-dns" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.164567 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" containerName="dnsmasq-dns" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.166268 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.166313 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.167798 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.167896 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.169600 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.173646 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.173857 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.173945 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-gdhfj" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.178061 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/91ce7752-be0e-4efe-8ab5-91312f1e8c5a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.178751 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.280101 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.282897 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293516 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293629 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293811 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293859 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293885 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293920 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.293985 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.294019 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjrh2\" (UniqueName: \"kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.294047 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.294087 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.294124 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.294146 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd4z6\" (UniqueName: \"kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: E1122 07:48:44.348656 4929 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 22 07:48:44 crc kubenswrapper[4929]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/59b1c0b3-02ff-4ab0-9708-7e3ec7035790/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 22 07:48:44 crc kubenswrapper[4929]: > podSandboxID="6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b" Nov 22 07:48:44 crc kubenswrapper[4929]: E1122 07:48:44.348912 4929 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 07:48:44 crc kubenswrapper[4929]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68dh5cfh575h554hbch58ch664h545h57ch78h589h555h57bh9ch589h5c8h79h574h57bh68ch556h89h65h5f8h558h687h68fh666h658h87h665h65cq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gggrk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bf4c8dd6c-btzlg_openstack(59b1c0b3-02ff-4ab0-9708-7e3ec7035790): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/59b1c0b3-02ff-4ab0-9708-7e3ec7035790/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 22 07:48:44 crc kubenswrapper[4929]: > logger="UnhandledError" Nov 22 07:48:44 crc kubenswrapper[4929]: E1122 07:48:44.350117 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/59b1c0b3-02ff-4ab0-9708-7e3ec7035790/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" podUID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.395706 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396061 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396084 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396113 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396161 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396183 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjrh2\" (UniqueName: \"kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396239 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396265 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396298 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396318 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd4z6\" (UniqueName: \"kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396344 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396402 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396491 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.396535 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.397445 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.397541 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.397582 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.397761 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.398080 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.398129 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.453017 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.453224 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.456430 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.457103 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.459592 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd4z6\" (UniqueName: \"kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.459861 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.459911 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.460566 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjrh2\" (UniqueName: \"kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.493591 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.544860 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.553613 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.588718 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.626102 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.719896 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerStarted","Data":"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad"} Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.727092 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" event={"ID":"08943094-b174-4891-80da-386b43fa40c6","Type":"ContainerStarted","Data":"92c7b3d61198dc0208ccb8baec836ec66a2759b816097f7cf08b04c57734d9cb"} Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.732521 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerStarted","Data":"041248002a0f79e917887cea403ae43aa0320cf65136be65cc42d7964baa02ef"} Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.737535 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" event={"ID":"91ce7752-be0e-4efe-8ab5-91312f1e8c5a","Type":"ContainerDied","Data":"e4751e26062feaeeaf04752146fc63074005c4d0083aa9c1a1a67e6398e4c483"} Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.737595 4929 scope.go:117] "RemoveContainer" containerID="b95a78816fd7a5bde0bdc2127ffccd44cd070ccea0d386bac84c73bcb28bc802" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.738762 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58957f86ff-4pswn" Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.845292 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.856495 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:44 crc kubenswrapper[4929]: I1122 07:48:44.867851 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58957f86ff-4pswn"] Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.014442 4929 scope.go:117] "RemoveContainer" containerID="548ed429b9974bd0174fbd541ede91b8da048d8c1b7e74429141a9c2c8113f73" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.541612 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.581563 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658197 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658273 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gggrk\" (UniqueName: \"kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658336 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658400 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658426 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.658466 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0\") pod \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\" (UID: \"59b1c0b3-02ff-4ab0-9708-7e3ec7035790\") " Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.673019 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk" (OuterVolumeSpecName: "kube-api-access-gggrk") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "kube-api-access-gggrk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.759615 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.759644 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bf4c8dd6c-btzlg" event={"ID":"59b1c0b3-02ff-4ab0-9708-7e3ec7035790","Type":"ContainerDied","Data":"6d662b7412aeb2135cca061d4feb0ea87757280c69685dd09e92fb409051435b"} Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.760017 4929 scope.go:117] "RemoveContainer" containerID="51ad19043808b71d755b018d8d85cbf4e65b31ba04768d75d478c5e41b7914cd" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.761631 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gggrk\" (UniqueName: \"kubernetes.io/projected/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-kube-api-access-gggrk\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.763026 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerStarted","Data":"b35a01523b0a61c2d10ff927db42146d35538f8263d58e37888f32f02797e187"} Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.774745 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.777618 4929 generic.go:334] "Generic (PLEG): container finished" podID="08943094-b174-4891-80da-386b43fa40c6" containerID="b4482b450cf2f85ec75e4bb17c90708a8e4aa915651ddef297737c543c3c717a" exitCode=0 Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.777682 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" event={"ID":"08943094-b174-4891-80da-386b43fa40c6","Type":"ContainerDied","Data":"b4482b450cf2f85ec75e4bb17c90708a8e4aa915651ddef297737c543c3c717a"} Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.811632 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerStarted","Data":"d9498f0408539ef76fb8ca6f1466f4ca5e445655530fb7a02e0baa02c3b49665"} Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.877045 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.888378 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:45 crc kubenswrapper[4929]: I1122 07:48:45.979098 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.012488 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ce7752-be0e-4efe-8ab5-91312f1e8c5a" path="/var/lib/kubelet/pods/91ce7752-be0e-4efe-8ab5-91312f1e8c5a/volumes" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.108597 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config" (OuterVolumeSpecName: "config") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.182164 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.189242 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.190556 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "59b1c0b3-02ff-4ab0-9708-7e3ec7035790" (UID: "59b1c0b3-02ff-4ab0-9708-7e3ec7035790"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.284812 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.284853 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/59b1c0b3-02ff-4ab0-9708-7e3ec7035790-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.488842 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.606032 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.621337 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bf4c8dd6c-btzlg"] Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.840383 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerStarted","Data":"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd"} Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.840737 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.854453 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" event={"ID":"08943094-b174-4891-80da-386b43fa40c6","Type":"ContainerStarted","Data":"6e0180514f75f36d512ef82c11723603a0b065dfa813eaedc7ccc5f2491902df"} Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.854650 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.862411 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerStarted","Data":"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800"} Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.863260 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.86318524 podStartE2EDuration="6.86318524s" podCreationTimestamp="2025-11-22 07:48:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:46.859297304 +0000 UTC m=+2263.968751327" watchObservedRunningTime="2025-11-22 07:48:46.86318524 +0000 UTC m=+2263.972639253" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.902502 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" podStartSLOduration=4.9024803850000005 podStartE2EDuration="4.902480385s" podCreationTimestamp="2025-11-22 07:48:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:46.897969733 +0000 UTC m=+2264.007423746" watchObservedRunningTime="2025-11-22 07:48:46.902480385 +0000 UTC m=+2264.011934398" Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.906226 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerStarted","Data":"0570d8e5229cce06b1ab4feda27516a1ba9b491b09382d026314787be9fa04a8"} Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.915184 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerStarted","Data":"dfe2f533eb5949d42cb46c622d8b68c90feee37ec487852c01b681bb3be739f8"} Nov 22 07:48:46 crc kubenswrapper[4929]: I1122 07:48:46.956957 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.11458423 podStartE2EDuration="6.956936917s" podCreationTimestamp="2025-11-22 07:48:40 +0000 UTC" firstStartedPulling="2025-11-22 07:48:41.767510473 +0000 UTC m=+2258.876964486" lastFinishedPulling="2025-11-22 07:48:42.60986316 +0000 UTC m=+2259.719317173" observedRunningTime="2025-11-22 07:48:46.956341952 +0000 UTC m=+2264.065795965" watchObservedRunningTime="2025-11-22 07:48:46.956936917 +0000 UTC m=+2264.066390930" Nov 22 07:48:47 crc kubenswrapper[4929]: I1122 07:48:47.059721 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:47 crc kubenswrapper[4929]: I1122 07:48:47.932107 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerStarted","Data":"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42"} Nov 22 07:48:47 crc kubenswrapper[4929]: I1122 07:48:47.935876 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerStarted","Data":"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743"} Nov 22 07:48:47 crc kubenswrapper[4929]: I1122 07:48:47.941303 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerStarted","Data":"2da1966bea9be7e20c6536d6dfabcf7887a16d7a4729d35c014caccf5dd9f622"} Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.007155 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" path="/var/lib/kubelet/pods/59b1c0b3-02ff-4ab0-9708-7e3ec7035790/volumes" Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.281422 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.282165 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.304346 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.320859 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.422758 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.959164 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerStarted","Data":"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282"} Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.962863 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerStarted","Data":"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6"} Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.963123 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-httpd" containerID="cri-o://a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" gracePeriod=30 Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.963652 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-log" containerID="cri-o://94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" gracePeriod=30 Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.968638 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerStarted","Data":"7324f44ba140ac1ee7a731d97849c71cbb503e3755550a4f89dd705bc8585209"} Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.968695 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api-log" containerID="cri-o://039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" gracePeriod=30 Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.968799 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api" containerID="cri-o://e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" gracePeriod=30 Nov 22 07:48:48 crc kubenswrapper[4929]: I1122 07:48:48.996998 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.996979063 podStartE2EDuration="5.996979063s" podCreationTimestamp="2025-11-22 07:48:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:48.994341887 +0000 UTC m=+2266.103795900" watchObservedRunningTime="2025-11-22 07:48:48.996979063 +0000 UTC m=+2266.106433076" Nov 22 07:48:49 crc kubenswrapper[4929]: I1122 07:48:49.049852 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.049833095 podStartE2EDuration="7.049833095s" podCreationTimestamp="2025-11-22 07:48:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:49.042500293 +0000 UTC m=+2266.151954306" watchObservedRunningTime="2025-11-22 07:48:49.049833095 +0000 UTC m=+2266.159287108" Nov 22 07:48:49 crc kubenswrapper[4929]: I1122 07:48:49.832867 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:49 crc kubenswrapper[4929]: I1122 07:48:49.992241 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002146 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rd4z6\" (UniqueName: \"kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002231 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002264 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002286 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002512 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002566 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.002599 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ee6e3776-d7fa-44ad-a388-dabd60eece61\" (UID: \"ee6e3776-d7fa-44ad-a388-dabd60eece61\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.005571 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs" (OuterVolumeSpecName: "logs") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.016245 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.017390 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts" (OuterVolumeSpecName: "scripts") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.017955 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.024525 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6" (OuterVolumeSpecName: "kube-api-access-rd4z6") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "kube-api-access-rd4z6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027720 4929 generic.go:334] "Generic (PLEG): container finished" podID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerID="e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" exitCode=0 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027752 4929 generic.go:334] "Generic (PLEG): container finished" podID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerID="039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" exitCode=143 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027816 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerDied","Data":"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027848 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerDied","Data":"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027926 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6211e00a-48a8-43ff-a763-0a0fc09df943","Type":"ContainerDied","Data":"e7c452a481dffd6c16b9b929d020ef9c519c66674fb4e3a58e510d1439a76949"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.027947 4929 scope.go:117] "RemoveContainer" containerID="e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.028143 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.040381 4929 generic.go:334] "Generic (PLEG): container finished" podID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerID="a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" exitCode=143 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.040470 4929 generic.go:334] "Generic (PLEG): container finished" podID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerID="94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" exitCode=143 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.041116 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-log" containerID="cri-o://2da1966bea9be7e20c6536d6dfabcf7887a16d7a4729d35c014caccf5dd9f622" gracePeriod=30 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.041398 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-httpd" containerID="cri-o://7324f44ba140ac1ee7a731d97849c71cbb503e3755550a4f89dd705bc8585209" gracePeriod=30 Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.041484 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerDied","Data":"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.041542 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerDied","Data":"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.041562 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"ee6e3776-d7fa-44ad-a388-dabd60eece61","Type":"ContainerDied","Data":"0570d8e5229cce06b1ab4feda27516a1ba9b491b09382d026314787be9fa04a8"} Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.045314 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.065599 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.104363 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data" (OuterVolumeSpecName: "config-data") pod "ee6e3776-d7fa-44ad-a388-dabd60eece61" (UID: "ee6e3776-d7fa-44ad-a388-dabd60eece61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105042 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105118 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6mxqn\" (UniqueName: \"kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105179 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105246 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105288 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105354 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105395 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data\") pod \"6211e00a-48a8-43ff-a763-0a0fc09df943\" (UID: \"6211e00a-48a8-43ff-a763-0a0fc09df943\") " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.105483 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.106685 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs" (OuterVolumeSpecName: "logs") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.108228 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn" (OuterVolumeSpecName: "kube-api-access-6mxqn") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "kube-api-access-6mxqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.110708 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112230 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112251 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112259 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112268 4929 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ee6e3776-d7fa-44ad-a388-dabd60eece61-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112277 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6mxqn\" (UniqueName: \"kubernetes.io/projected/6211e00a-48a8-43ff-a763-0a0fc09df943-kube-api-access-6mxqn\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112286 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee6e3776-d7fa-44ad-a388-dabd60eece61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112332 4929 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112341 4929 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112349 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6211e00a-48a8-43ff-a763-0a0fc09df943-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112359 4929 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6211e00a-48a8-43ff-a763-0a0fc09df943-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112508 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts" (OuterVolumeSpecName: "scripts") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.112368 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rd4z6\" (UniqueName: \"kubernetes.io/projected/ee6e3776-d7fa-44ad-a388-dabd60eece61-kube-api-access-rd4z6\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.141407 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.149030 4929 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.166611 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data" (OuterVolumeSpecName: "config-data") pod "6211e00a-48a8-43ff-a763-0a0fc09df943" (UID: "6211e00a-48a8-43ff-a763-0a0fc09df943"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.195156 4929 scope.go:117] "RemoveContainer" containerID="039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.235555 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.235602 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.235617 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6211e00a-48a8-43ff-a763-0a0fc09df943-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.235632 4929 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.367454 4929 scope.go:117] "RemoveContainer" containerID="e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.367967 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd\": container with ID starting with e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd not found: ID does not exist" containerID="e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368025 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd"} err="failed to get container status \"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd\": rpc error: code = NotFound desc = could not find container \"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd\": container with ID starting with e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368058 4929 scope.go:117] "RemoveContainer" containerID="039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.368389 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad\": container with ID starting with 039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad not found: ID does not exist" containerID="039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368446 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad"} err="failed to get container status \"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad\": rpc error: code = NotFound desc = could not find container \"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad\": container with ID starting with 039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368491 4929 scope.go:117] "RemoveContainer" containerID="e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368751 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd"} err="failed to get container status \"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd\": rpc error: code = NotFound desc = could not find container \"e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd\": container with ID starting with e46e9ef7d90478c5695cddd6854190d150b640961648a25fe85c8e7fa5cd88bd not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368777 4929 scope.go:117] "RemoveContainer" containerID="039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368956 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad"} err="failed to get container status \"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad\": rpc error: code = NotFound desc = could not find container \"039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad\": container with ID starting with 039b1d2f705f529960c8eeb7a6f5ff6a07428a21ebb2d84e54c52900426385ad not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.368980 4929 scope.go:117] "RemoveContainer" containerID="a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.374340 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.388366 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.396261 4929 scope.go:117] "RemoveContainer" containerID="94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.398601 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.411502 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.437759 4929 scope.go:117] "RemoveContainer" containerID="a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438144 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438355 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6\": container with ID starting with a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6 not found: ID does not exist" containerID="a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438478 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6"} err="failed to get container status \"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6\": rpc error: code = NotFound desc = could not find container \"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6\": container with ID starting with a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6 not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438566 4929 scope.go:117] "RemoveContainer" containerID="94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438673 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438697 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438724 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api-log" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438732 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api-log" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438744 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-httpd" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438754 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-httpd" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438780 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" containerName="init" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438786 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" containerName="init" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.438814 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-log" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.438821 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-log" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.439041 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-httpd" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.439056 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="59b1c0b3-02ff-4ab0-9708-7e3ec7035790" containerName="init" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.439069 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" containerName="glance-log" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.439088 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.439108 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" containerName="cinder-api-log" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.443142 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.445836 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.446074 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 07:48:50 crc kubenswrapper[4929]: E1122 07:48:50.449297 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743\": container with ID starting with 94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743 not found: ID does not exist" containerID="94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.449537 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743"} err="failed to get container status \"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743\": rpc error: code = NotFound desc = could not find container \"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743\": container with ID starting with 94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743 not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.449660 4929 scope.go:117] "RemoveContainer" containerID="a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.450721 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.450779 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.455117 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6"} err="failed to get container status \"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6\": rpc error: code = NotFound desc = could not find container \"a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6\": container with ID starting with a023cc31dc605a493da2b881a0b6ed4aa8c35206439e896c1ac816d8c66adbf6 not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.455418 4929 scope.go:117] "RemoveContainer" containerID="94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.456290 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743"} err="failed to get container status \"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743\": rpc error: code = NotFound desc = could not find container \"94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743\": container with ID starting with 94239900a4338a088d4f1025afe63b67da619e9fd463b37b0221572777911743 not found: ID does not exist" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.460506 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.462561 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.465940 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.476562 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.487546 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.499874 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541013 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-logs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541079 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdhfj\" (UniqueName: \"kubernetes.io/projected/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-kube-api-access-mdhfj\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541139 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541163 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541202 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-scripts\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541258 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data-custom\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541330 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541606 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.541713 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644655 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644754 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644795 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644872 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-scripts\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644910 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644904 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.644960 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data-custom\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645021 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645133 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645174 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645262 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrjr2\" (UniqueName: \"kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645299 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645331 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645409 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645441 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645477 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645514 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-logs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.645557 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdhfj\" (UniqueName: \"kubernetes.io/projected/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-kube-api-access-mdhfj\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.648063 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-logs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.650993 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-scripts\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.651070 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data-custom\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.652238 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.656613 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.656991 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-config-data\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.663054 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.666433 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdhfj\" (UniqueName: \"kubernetes.io/projected/c0f7a9d8-548a-4490-9fd9-8c431e6ca06a-kube-api-access-mdhfj\") pod \"cinder-api-0\" (UID: \"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a\") " pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747061 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747118 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747162 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747609 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747677 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.747778 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.750164 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrjr2\" (UniqueName: \"kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.750272 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.750295 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.750330 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.752186 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.752441 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.753993 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.759141 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.771721 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.775074 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrjr2\" (UniqueName: \"kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.790133 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.790722 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.803533 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:48:50 crc kubenswrapper[4929]: I1122 07:48:50.935469 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.019540 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.021644 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.193:8080/\": dial tcp 10.217.0.193:8080: connect: connection refused" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.150326 4929 generic.go:334] "Generic (PLEG): container finished" podID="4be58485-cd5a-49c4-8471-d1013862e52d" containerID="7324f44ba140ac1ee7a731d97849c71cbb503e3755550a4f89dd705bc8585209" exitCode=0 Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.150700 4929 generic.go:334] "Generic (PLEG): container finished" podID="4be58485-cd5a-49c4-8471-d1013862e52d" containerID="2da1966bea9be7e20c6536d6dfabcf7887a16d7a4729d35c014caccf5dd9f622" exitCode=143 Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.151747 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerDied","Data":"7324f44ba140ac1ee7a731d97849c71cbb503e3755550a4f89dd705bc8585209"} Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.151781 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerDied","Data":"2da1966bea9be7e20c6536d6dfabcf7887a16d7a4729d35c014caccf5dd9f622"} Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.748972 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.902140 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.906905 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.906970 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.907066 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjrh2\" (UniqueName: \"kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.907119 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.907179 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.907276 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.907362 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs\") pod \"4be58485-cd5a-49c4-8471-d1013862e52d\" (UID: \"4be58485-cd5a-49c4-8471-d1013862e52d\") " Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.908300 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs" (OuterVolumeSpecName: "logs") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.914418 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.917312 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.919534 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts" (OuterVolumeSpecName: "scripts") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.925471 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2" (OuterVolumeSpecName: "kube-api-access-zjrh2") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "kube-api-access-zjrh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.988574 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6211e00a-48a8-43ff-a763-0a0fc09df943" path="/var/lib/kubelet/pods/6211e00a-48a8-43ff-a763-0a0fc09df943/volumes" Nov 22 07:48:51 crc kubenswrapper[4929]: I1122 07:48:51.990531 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee6e3776-d7fa-44ad-a388-dabd60eece61" path="/var/lib/kubelet/pods/ee6e3776-d7fa-44ad-a388-dabd60eece61/volumes" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.010393 4929 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.010425 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4be58485-cd5a-49c4-8471-d1013862e52d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.010434 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.010441 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjrh2\" (UniqueName: \"kubernetes.io/projected/4be58485-cd5a-49c4-8471-d1013862e52d-kube-api-access-zjrh2\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.010467 4929 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.048814 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.056529 4929 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.116697 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.126666 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data" (OuterVolumeSpecName: "config-data") pod "4be58485-cd5a-49c4-8471-d1013862e52d" (UID: "4be58485-cd5a-49c4-8471-d1013862e52d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.141580 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.147488 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4be58485-cd5a-49c4-8471-d1013862e52d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.147613 4929 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.202431 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4be58485-cd5a-49c4-8471-d1013862e52d","Type":"ContainerDied","Data":"b35a01523b0a61c2d10ff927db42146d35538f8263d58e37888f32f02797e187"} Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.202500 4929 scope.go:117] "RemoveContainer" containerID="7324f44ba140ac1ee7a731d97849c71cbb503e3755550a4f89dd705bc8585209" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.202767 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.228296 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a","Type":"ContainerStarted","Data":"80e21a4dd55de3ea7efc03a3265b2232a227ec1e76f2ca43d760c98c68d228ff"} Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.281799 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284225 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerStarted","Data":"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398"} Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284468 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-central-agent" containerID="cri-o://79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800" gracePeriod=30 Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284575 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284717 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="proxy-httpd" containerID="cri-o://03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398" gracePeriod=30 Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284924 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-notification-agent" containerID="cri-o://0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42" gracePeriod=30 Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.284992 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="sg-core" containerID="cri-o://8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282" gracePeriod=30 Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.298567 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.309606 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerStarted","Data":"a113c02fd5e128fdbc9da6dc8973ba7d1a6ecab47b49fab4649e06322d0fbdb6"} Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.315361 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:52 crc kubenswrapper[4929]: E1122 07:48:52.315946 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-log" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.315965 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-log" Nov 22 07:48:52 crc kubenswrapper[4929]: E1122 07:48:52.315998 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-httpd" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.316006 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-httpd" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.316256 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-log" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.316283 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" containerName="glance-httpd" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.317726 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.322426 4929 scope.go:117] "RemoveContainer" containerID="2da1966bea9be7e20c6536d6dfabcf7887a16d7a4729d35c014caccf5dd9f622" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.322865 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.326038 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.358392 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.414613 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.8433880499999997 podStartE2EDuration="9.41459109s" podCreationTimestamp="2025-11-22 07:48:43 +0000 UTC" firstStartedPulling="2025-11-22 07:48:45.26529773 +0000 UTC m=+2262.374751743" lastFinishedPulling="2025-11-22 07:48:50.83650077 +0000 UTC m=+2267.945954783" observedRunningTime="2025-11-22 07:48:52.328155445 +0000 UTC m=+2269.437609468" watchObservedRunningTime="2025-11-22 07:48:52.41459109 +0000 UTC m=+2269.524045103" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.456002 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.457466 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.457833 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.457913 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.457960 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpkvz\" (UniqueName: \"kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.458050 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.458128 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.458162 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.561868 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.561993 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562037 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpkvz\" (UniqueName: \"kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562100 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562157 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562190 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562261 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.562296 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.567126 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.568549 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.568903 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.570079 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.571342 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.581535 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.584943 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.615082 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpkvz\" (UniqueName: \"kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.638492 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " pod="openstack/glance-default-external-api-0" Nov 22 07:48:52 crc kubenswrapper[4929]: I1122 07:48:52.719751 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350263 4929 generic.go:334] "Generic (PLEG): container finished" podID="c4e20beb-f178-437e-a8db-2138531b8aab" containerID="03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398" exitCode=0 Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350733 4929 generic.go:334] "Generic (PLEG): container finished" podID="c4e20beb-f178-437e-a8db-2138531b8aab" containerID="8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282" exitCode=2 Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350303 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerDied","Data":"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398"} Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350770 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerDied","Data":"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282"} Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350781 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerDied","Data":"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42"} Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.350742 4929 generic.go:334] "Generic (PLEG): container finished" podID="c4e20beb-f178-437e-a8db-2138531b8aab" containerID="0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42" exitCode=0 Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.371843 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a","Type":"ContainerStarted","Data":"8d75c77e438ca2c609ae6c11a9e3d77d9f17fc7fb7f1a34b331503cf40c484cc"} Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.485676 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.518526 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.588735 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.589123 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="dnsmasq-dns" containerID="cri-o://26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" gracePeriod=10 Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.922117 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:53 crc kubenswrapper[4929]: I1122 07:48:53.966575 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4be58485-cd5a-49c4-8471-d1013862e52d" path="/var/lib/kubelet/pods/4be58485-cd5a-49c4-8471-d1013862e52d/volumes" Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.417137 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerStarted","Data":"515918d68c5b4cdcdf6d2e31f09d1afb3b97ee7db0b01b071de852220a51d302"} Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.433072 4929 generic.go:334] "Generic (PLEG): container finished" podID="4b60b952-faed-4624-a268-ec9a01c4271a" containerID="26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" exitCode=0 Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.433133 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerDied","Data":"26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90"} Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.464744 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerStarted","Data":"3c0a40c583532b24c72a8476490339f89ca8e2d8c95fee096bc9399561142705"} Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.704705 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7486bd8978-4r6hg" Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.801972 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.802193 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" containerID="cri-o://4fee07e82750c13c657fa3ec87573da70fdde66c21767b4723fde145210d80f7" gracePeriod=30 Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.804443 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" containerID="cri-o://2620af1a3ffa6ae9353c4a43eb0f1a0fbd7de998beb3bacffc68a0dbeb67c02a" gracePeriod=30 Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.821127 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": EOF" Nov 22 07:48:54 crc kubenswrapper[4929]: I1122 07:48:54.821450 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": EOF" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.019817 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153040 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153156 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153283 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rr75\" (UniqueName: \"kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153310 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153331 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.153564 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb\") pod \"4b60b952-faed-4624-a268-ec9a01c4271a\" (UID: \"4b60b952-faed-4624-a268-ec9a01c4271a\") " Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.160919 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75" (OuterVolumeSpecName: "kube-api-access-9rr75") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "kube-api-access-9rr75". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.226162 4929 scope.go:117] "RemoveContainer" containerID="e9733877fa4257bd13e7a09e1ea4585937dcd795e20c87d558346307cd476837" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.243449 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.254712 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.256185 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.256323 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rr75\" (UniqueName: \"kubernetes.io/projected/4b60b952-faed-4624-a268-ec9a01c4271a-kube-api-access-9rr75\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.256338 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.256962 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.271311 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.289654 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config" (OuterVolumeSpecName: "config") pod "4b60b952-faed-4624-a268-ec9a01c4271a" (UID: "4b60b952-faed-4624-a268-ec9a01c4271a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.360202 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.360260 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.360274 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4b60b952-faed-4624-a268-ec9a01c4271a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.463068 4929 scope.go:117] "RemoveContainer" containerID="d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.479577 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerStarted","Data":"a8e4db0dcc75a4fd567e3ee46593f2b974b3894cbdeaf87a81a87551b0631c2c"} Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.481645 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c0f7a9d8-548a-4490-9fd9-8c431e6ca06a","Type":"ContainerStarted","Data":"24b7b660acb26231ce8ad0942b55cf240104107431d13635db7fa72abe210c42"} Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.481921 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.484074 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" event={"ID":"4b60b952-faed-4624-a268-ec9a01c4271a","Type":"ContainerDied","Data":"d581e57b0890f59e85a0fa869b7ef988ca25fe313e4fd2daa7e93f2c351c7c71"} Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.484119 4929 scope.go:117] "RemoveContainer" containerID="26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.484295 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-58dd9ff6bc-c72g6" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.497706 4929 generic.go:334] "Generic (PLEG): container finished" podID="494b6c5c-2251-443b-bd47-fa903a139c47" containerID="4fee07e82750c13c657fa3ec87573da70fdde66c21767b4723fde145210d80f7" exitCode=143 Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.497794 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerDied","Data":"4fee07e82750c13c657fa3ec87573da70fdde66c21767b4723fde145210d80f7"} Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.501631 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerStarted","Data":"522ec04975e0cb6924ba8ce234fb987d75495d028d28f1ab472626dc3377bdb5"} Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.530260 4929 scope.go:117] "RemoveContainer" containerID="26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.535188 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.535161085 podStartE2EDuration="5.535161085s" podCreationTimestamp="2025-11-22 07:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:55.513964329 +0000 UTC m=+2272.623418342" watchObservedRunningTime="2025-11-22 07:48:55.535161085 +0000 UTC m=+2272.644615098" Nov 22 07:48:55 crc kubenswrapper[4929]: E1122 07:48:55.554987 4929 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_dnsmasq-dns_dnsmasq-dns-58dd9ff6bc-c72g6_openstack_4b60b952-faed-4624-a268-ec9a01c4271a_0 in pod sandbox d581e57b0890f59e85a0fa869b7ef988ca25fe313e4fd2daa7e93f2c351c7c71 from index: no such id: '26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90'" containerID="26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" Nov 22 07:48:55 crc kubenswrapper[4929]: E1122 07:48:55.555545 4929 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_dnsmasq-dns_dnsmasq-dns-58dd9ff6bc-c72g6_openstack_4b60b952-faed-4624-a268-ec9a01c4271a_0 in pod sandbox d581e57b0890f59e85a0fa869b7ef988ca25fe313e4fd2daa7e93f2c351c7c71 from index: no such id: '26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90'" containerID="26060ba06b38077d09eebc10477fa54dfe2f7470d78e65bfe8cf247a0fae7d90" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.555376 4929 scope.go:117] "RemoveContainer" containerID="d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a" Nov 22 07:48:55 crc kubenswrapper[4929]: E1122 07:48:55.562010 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a\": container with ID starting with d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a not found: ID does not exist" containerID="d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.562056 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a"} err="failed to get container status \"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a\": rpc error: code = NotFound desc = could not find container \"d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a\": container with ID starting with d53ca4c874c22adecb43c54da8fbd50500cf34316a69df4871a1e68d39498a6a not found: ID does not exist" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.563097 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.563069018 podStartE2EDuration="5.563069018s" podCreationTimestamp="2025-11-22 07:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:55.552394343 +0000 UTC m=+2272.661848366" watchObservedRunningTime="2025-11-22 07:48:55.563069018 +0000 UTC m=+2272.672523041" Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.605265 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.623715 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-58dd9ff6bc-c72g6"] Nov 22 07:48:55 crc kubenswrapper[4929]: I1122 07:48:55.962048 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" path="/var/lib/kubelet/pods/4b60b952-faed-4624-a268-ec9a01c4271a/volumes" Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.442934 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.491755 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.524959 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerStarted","Data":"e6ea2e1b597b0203e52db2fa7190f33a09abba40a11757dfdb54d05cd4a5a638"} Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.525126 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="cinder-scheduler" containerID="cri-o://041248002a0f79e917887cea403ae43aa0320cf65136be65cc42d7964baa02ef" gracePeriod=30 Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.525186 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="probe" containerID="cri-o://dfe2f533eb5949d42cb46c622d8b68c90feee37ec487852c01b681bb3be739f8" gracePeriod=30 Nov 22 07:48:56 crc kubenswrapper[4929]: I1122 07:48:56.549573 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.549558223 podStartE2EDuration="4.549558223s" podCreationTimestamp="2025-11-22 07:48:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:48:56.546966238 +0000 UTC m=+2273.656420251" watchObservedRunningTime="2025-11-22 07:48:56.549558223 +0000 UTC m=+2273.659012236" Nov 22 07:48:57 crc kubenswrapper[4929]: I1122 07:48:57.507957 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:48:57 crc kubenswrapper[4929]: I1122 07:48:57.536682 4929 generic.go:334] "Generic (PLEG): container finished" podID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerID="dfe2f533eb5949d42cb46c622d8b68c90feee37ec487852c01b681bb3be739f8" exitCode=0 Nov 22 07:48:57 crc kubenswrapper[4929]: I1122 07:48:57.536734 4929 generic.go:334] "Generic (PLEG): container finished" podID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerID="041248002a0f79e917887cea403ae43aa0320cf65136be65cc42d7964baa02ef" exitCode=0 Nov 22 07:48:57 crc kubenswrapper[4929]: I1122 07:48:57.536774 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerDied","Data":"dfe2f533eb5949d42cb46c622d8b68c90feee37ec487852c01b681bb3be739f8"} Nov 22 07:48:57 crc kubenswrapper[4929]: I1122 07:48:57.536838 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerDied","Data":"041248002a0f79e917887cea403ae43aa0320cf65136be65cc42d7964baa02ef"} Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.383049 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427445 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427513 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427670 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427676 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427727 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427809 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.427870 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbfm2\" (UniqueName: \"kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2\") pod \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\" (UID: \"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3\") " Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.428483 4929 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.433558 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2" (OuterVolumeSpecName: "kube-api-access-vbfm2") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "kube-api-access-vbfm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.434820 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts" (OuterVolumeSpecName: "scripts") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.439401 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.504542 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.530631 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.530699 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.530712 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbfm2\" (UniqueName: \"kubernetes.io/projected/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-kube-api-access-vbfm2\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.530721 4929 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.546905 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data" (OuterVolumeSpecName: "config-data") pod "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" (UID: "40fd17d3-85aa-4fa4-b933-7c10e2ca41b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.548399 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.548470 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"40fd17d3-85aa-4fa4-b933-7c10e2ca41b3","Type":"ContainerDied","Data":"712ae19f01377c7ab8dfba4ed3bf0ba7ec9eea2cb3d447f9925a60ca91ba27b4"} Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.548511 4929 scope.go:117] "RemoveContainer" containerID="dfe2f533eb5949d42cb46c622d8b68c90feee37ec487852c01b681bb3be739f8" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.548508 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-log" containerID="cri-o://a8e4db0dcc75a4fd567e3ee46593f2b974b3894cbdeaf87a81a87551b0631c2c" gracePeriod=30 Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.548838 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-httpd" containerID="cri-o://e6ea2e1b597b0203e52db2fa7190f33a09abba40a11757dfdb54d05cd4a5a638" gracePeriod=30 Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.603348 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.614025 4929 scope.go:117] "RemoveContainer" containerID="041248002a0f79e917887cea403ae43aa0320cf65136be65cc42d7964baa02ef" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.620981 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.628823 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:58 crc kubenswrapper[4929]: E1122 07:48:58.629193 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="dnsmasq-dns" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629232 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="dnsmasq-dns" Nov 22 07:48:58 crc kubenswrapper[4929]: E1122 07:48:58.629265 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="cinder-scheduler" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629272 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="cinder-scheduler" Nov 22 07:48:58 crc kubenswrapper[4929]: E1122 07:48:58.629295 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="probe" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629301 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="probe" Nov 22 07:48:58 crc kubenswrapper[4929]: E1122 07:48:58.629315 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="init" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629320 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="init" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629490 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="probe" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629513 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" containerName="cinder-scheduler" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.629524 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b60b952-faed-4624-a268-ec9a01c4271a" containerName="dnsmasq-dns" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.630624 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.632247 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.634314 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.644722 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.734155 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.734523 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.734670 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.734832 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-scripts\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.734981 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.735082 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7884\" (UniqueName: \"kubernetes.io/projected/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-kube-api-access-l7884\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836654 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836715 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836780 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836822 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-scripts\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836862 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.836881 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7884\" (UniqueName: \"kubernetes.io/projected/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-kube-api-access-l7884\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.837445 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.840744 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-scripts\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.841044 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.841401 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.841679 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.856526 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7884\" (UniqueName: \"kubernetes.io/projected/143bdc31-dc3f-4c3f-81e2-9e4314ba960d-kube-api-access-l7884\") pod \"cinder-scheduler-0\" (UID: \"143bdc31-dc3f-4c3f-81e2-9e4314ba960d\") " pod="openstack/cinder-scheduler-0" Nov 22 07:48:58 crc kubenswrapper[4929]: I1122 07:48:58.971229 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.237440 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": read tcp 10.217.0.2:55808->10.217.0.191:9311: read: connection reset by peer" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.237443 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": read tcp 10.217.0.2:55810->10.217.0.191:9311: read: connection reset by peer" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.238172 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68cf69f54-cvmvj" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.191:9311/healthcheck\": dial tcp 10.217.0.191:9311: connect: connection refused" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.238412 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.445947 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 07:48:59 crc kubenswrapper[4929]: W1122 07:48:59.472449 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod143bdc31_dc3f_4c3f_81e2_9e4314ba960d.slice/crio-7a504cd4e2a8745b5011bd8b876a2b444b6bbc452b0c853a8cb4881c908fead0 WatchSource:0}: Error finding container 7a504cd4e2a8745b5011bd8b876a2b444b6bbc452b0c853a8cb4881c908fead0: Status 404 returned error can't find the container with id 7a504cd4e2a8745b5011bd8b876a2b444b6bbc452b0c853a8cb4881c908fead0 Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.572892 4929 generic.go:334] "Generic (PLEG): container finished" podID="494b6c5c-2251-443b-bd47-fa903a139c47" containerID="2620af1a3ffa6ae9353c4a43eb0f1a0fbd7de998beb3bacffc68a0dbeb67c02a" exitCode=0 Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.572950 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerDied","Data":"2620af1a3ffa6ae9353c4a43eb0f1a0fbd7de998beb3bacffc68a0dbeb67c02a"} Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.587262 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"143bdc31-dc3f-4c3f-81e2-9e4314ba960d","Type":"ContainerStarted","Data":"7a504cd4e2a8745b5011bd8b876a2b444b6bbc452b0c853a8cb4881c908fead0"} Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.591795 4929 generic.go:334] "Generic (PLEG): container finished" podID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerID="e6ea2e1b597b0203e52db2fa7190f33a09abba40a11757dfdb54d05cd4a5a638" exitCode=0 Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.591829 4929 generic.go:334] "Generic (PLEG): container finished" podID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerID="a8e4db0dcc75a4fd567e3ee46593f2b974b3894cbdeaf87a81a87551b0631c2c" exitCode=143 Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.591852 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerDied","Data":"e6ea2e1b597b0203e52db2fa7190f33a09abba40a11757dfdb54d05cd4a5a638"} Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.591881 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerDied","Data":"a8e4db0dcc75a4fd567e3ee46593f2b974b3894cbdeaf87a81a87551b0631c2c"} Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.665324 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758153 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758262 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758299 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758376 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758441 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758536 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpkvz\" (UniqueName: \"kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758574 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.758619 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run\") pod \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\" (UID: \"bc78f9a9-1829-44d9-b141-d9ddc9f63da7\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.759813 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs" (OuterVolumeSpecName: "logs") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.759897 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.769340 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts" (OuterVolumeSpecName: "scripts") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.769570 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.769644 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz" (OuterVolumeSpecName: "kube-api-access-lpkvz") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "kube-api-access-lpkvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.800867 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.830591 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.848717 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data" (OuterVolumeSpecName: "config-data") pod "bc78f9a9-1829-44d9-b141-d9ddc9f63da7" (UID: "bc78f9a9-1829-44d9-b141-d9ddc9f63da7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861298 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861365 4929 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861380 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861393 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861407 4929 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861418 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpkvz\" (UniqueName: \"kubernetes.io/projected/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-kube-api-access-lpkvz\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861429 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.861439 4929 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bc78f9a9-1829-44d9-b141-d9ddc9f63da7-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.865320 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.893478 4929 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.962980 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data\") pod \"494b6c5c-2251-443b-bd47-fa903a139c47\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963119 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs\") pod \"494b6c5c-2251-443b-bd47-fa903a139c47\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963167 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle\") pod \"494b6c5c-2251-443b-bd47-fa903a139c47\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963230 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom\") pod \"494b6c5c-2251-443b-bd47-fa903a139c47\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963395 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klkdf\" (UniqueName: \"kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf\") pod \"494b6c5c-2251-443b-bd47-fa903a139c47\" (UID: \"494b6c5c-2251-443b-bd47-fa903a139c47\") " Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963919 4929 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.963904 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs" (OuterVolumeSpecName: "logs") pod "494b6c5c-2251-443b-bd47-fa903a139c47" (UID: "494b6c5c-2251-443b-bd47-fa903a139c47"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.968978 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40fd17d3-85aa-4fa4-b933-7c10e2ca41b3" path="/var/lib/kubelet/pods/40fd17d3-85aa-4fa4-b933-7c10e2ca41b3/volumes" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.970046 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "494b6c5c-2251-443b-bd47-fa903a139c47" (UID: "494b6c5c-2251-443b-bd47-fa903a139c47"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:48:59 crc kubenswrapper[4929]: I1122 07:48:59.970323 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf" (OuterVolumeSpecName: "kube-api-access-klkdf") pod "494b6c5c-2251-443b-bd47-fa903a139c47" (UID: "494b6c5c-2251-443b-bd47-fa903a139c47"). InnerVolumeSpecName "kube-api-access-klkdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.041241 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "494b6c5c-2251-443b-bd47-fa903a139c47" (UID: "494b6c5c-2251-443b-bd47-fa903a139c47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.048305 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data" (OuterVolumeSpecName: "config-data") pod "494b6c5c-2251-443b-bd47-fa903a139c47" (UID: "494b6c5c-2251-443b-bd47-fa903a139c47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.065559 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klkdf\" (UniqueName: \"kubernetes.io/projected/494b6c5c-2251-443b-bd47-fa903a139c47-kube-api-access-klkdf\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.065592 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.065604 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/494b6c5c-2251-443b-bd47-fa903a139c47-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.065617 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.065629 4929 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/494b6c5c-2251-443b-bd47-fa903a139c47-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.101093 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.166752 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgr9l\" (UniqueName: \"kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167165 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167320 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167362 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167417 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167477 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167529 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167563 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd\") pod \"c4e20beb-f178-437e-a8db-2138531b8aab\" (UID: \"c4e20beb-f178-437e-a8db-2138531b8aab\") " Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.167776 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.168164 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.168172 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.170126 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l" (OuterVolumeSpecName: "kube-api-access-rgr9l") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "kube-api-access-rgr9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.171305 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts" (OuterVolumeSpecName: "scripts") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.196198 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.214426 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.248428 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273332 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgr9l\" (UniqueName: \"kubernetes.io/projected/c4e20beb-f178-437e-a8db-2138531b8aab-kube-api-access-rgr9l\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273370 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273386 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273402 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273414 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.273426 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c4e20beb-f178-437e-a8db-2138531b8aab-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.295251 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data" (OuterVolumeSpecName: "config-data") pod "c4e20beb-f178-437e-a8db-2138531b8aab" (UID: "c4e20beb-f178-437e-a8db-2138531b8aab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.379847 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4e20beb-f178-437e-a8db-2138531b8aab-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.611497 4929 generic.go:334] "Generic (PLEG): container finished" podID="c4e20beb-f178-437e-a8db-2138531b8aab" containerID="79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800" exitCode=0 Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.611613 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.611602 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerDied","Data":"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800"} Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.612091 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c4e20beb-f178-437e-a8db-2138531b8aab","Type":"ContainerDied","Data":"d9498f0408539ef76fb8ca6f1466f4ca5e445655530fb7a02e0baa02c3b49665"} Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.612165 4929 scope.go:117] "RemoveContainer" containerID="03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.616527 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68cf69f54-cvmvj" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.616552 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68cf69f54-cvmvj" event={"ID":"494b6c5c-2251-443b-bd47-fa903a139c47","Type":"ContainerDied","Data":"6a5deacd589a08f25ac012105a2e612edb3d0d829951db3182a732079ee6aa3c"} Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.627937 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"143bdc31-dc3f-4c3f-81e2-9e4314ba960d","Type":"ContainerStarted","Data":"764596f54be3597db301e8a875acbf935d9e3514866fce53dec06eaefe3900fb"} Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.632172 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bc78f9a9-1829-44d9-b141-d9ddc9f63da7","Type":"ContainerDied","Data":"515918d68c5b4cdcdf6d2e31f09d1afb3b97ee7db0b01b071de852220a51d302"} Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.632302 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.651740 4929 scope.go:117] "RemoveContainer" containerID="8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.690278 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.718268 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.737154 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769160 4929 scope.go:117] "RemoveContainer" containerID="0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769349 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769817 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-central-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769834 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-central-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769851 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-log" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769859 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-log" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769884 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769891 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769904 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="sg-core" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769911 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="sg-core" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769927 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769934 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769956 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-notification-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769963 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-notification-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.769978 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.769985 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.770003 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="proxy-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770024 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="proxy-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770250 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-central-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770269 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770277 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="sg-core" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770293 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="proxy-httpd" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770310 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" containerName="glance-log" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770330 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770341 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" containerName="barbican-api-log" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.770357 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" containerName="ceilometer-notification-agent" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.777600 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-68cf69f54-cvmvj"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.778269 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.792788 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.793042 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.803338 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.804156 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.804484 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.804520 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.825281 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.858659 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.879511 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.881401 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.889509 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.892285 4929 scope.go:117] "RemoveContainer" containerID="79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.892748 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898505 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898553 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898597 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898676 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898704 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qc82\" (UniqueName: \"kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898727 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898885 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.898917 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.928267 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.933488 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.980521 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.985225 4929 scope.go:117] "RemoveContainer" containerID="03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.989401 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398\": container with ID starting with 03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398 not found: ID does not exist" containerID="03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.989449 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398"} err="failed to get container status \"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398\": rpc error: code = NotFound desc = could not find container \"03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398\": container with ID starting with 03e4f7ef8d14d79d92a0779577e613d262ca3b2877ca45233a0e6ddc527f6398 not found: ID does not exist" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.989479 4929 scope.go:117] "RemoveContainer" containerID="8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.995984 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282\": container with ID starting with 8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282 not found: ID does not exist" containerID="8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.996036 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282"} err="failed to get container status \"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282\": rpc error: code = NotFound desc = could not find container \"8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282\": container with ID starting with 8b16fefa582fe2cecb48c3f39af4100cc6e39b10d7b91acffc272e1552e51282 not found: ID does not exist" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.996065 4929 scope.go:117] "RemoveContainer" containerID="0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42" Nov 22 07:49:00 crc kubenswrapper[4929]: E1122 07:49:00.998124 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42\": container with ID starting with 0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42 not found: ID does not exist" containerID="0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.998163 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42"} err="failed to get container status \"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42\": rpc error: code = NotFound desc = could not find container \"0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42\": container with ID starting with 0da3600328d3a040b87cb189c16aaf8379a7bc8d0ee3e69106490d915c8f6f42 not found: ID does not exist" Nov 22 07:49:00 crc kubenswrapper[4929]: I1122 07:49:00.998191 4929 scope.go:117] "RemoveContainer" containerID="79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003342 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003411 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qc82\" (UniqueName: \"kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003446 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003525 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-logs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003563 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003596 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003629 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5ncs\" (UniqueName: \"kubernetes.io/projected/bae86c1f-5d59-4797-9d7e-f60207880160-kube-api-access-s5ncs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003691 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-scripts\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003718 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003755 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003777 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003801 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003841 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003876 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003907 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-config-data\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.003974 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.010108 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.010119 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: E1122 07:49:01.010317 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800\": container with ID starting with 79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800 not found: ID does not exist" containerID="79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.010352 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800"} err="failed to get container status \"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800\": rpc error: code = NotFound desc = could not find container \"79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800\": container with ID starting with 79b8401c0bd5f3d539c26d03616cb8e0b806ddef9b434740abfec2b172910800 not found: ID does not exist" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.010382 4929 scope.go:117] "RemoveContainer" containerID="2620af1a3ffa6ae9353c4a43eb0f1a0fbd7de998beb3bacffc68a0dbeb67c02a" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.016004 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.021877 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qc82\" (UniqueName: \"kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.022198 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.022332 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.024565 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.026806 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105721 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-logs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105775 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105805 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105829 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5ncs\" (UniqueName: \"kubernetes.io/projected/bae86c1f-5d59-4797-9d7e-f60207880160-kube-api-access-s5ncs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105863 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-scripts\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105914 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105931 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-config-data\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.105963 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.106198 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.106286 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bae86c1f-5d59-4797-9d7e-f60207880160-logs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.106373 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.114002 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-scripts\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.129337 4929 scope.go:117] "RemoveContainer" containerID="4fee07e82750c13c657fa3ec87573da70fdde66c21767b4723fde145210d80f7" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.130138 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.130435 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-config-data\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.131786 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bae86c1f-5d59-4797-9d7e-f60207880160-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.135834 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5ncs\" (UniqueName: \"kubernetes.io/projected/bae86c1f-5d59-4797-9d7e-f60207880160-kube-api-access-s5ncs\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.137572 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.178530 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"bae86c1f-5d59-4797-9d7e-f60207880160\") " pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.186555 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.209293 4929 scope.go:117] "RemoveContainer" containerID="e6ea2e1b597b0203e52db2fa7190f33a09abba40a11757dfdb54d05cd4a5a638" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.254300 4929 scope.go:117] "RemoveContainer" containerID="a8e4db0dcc75a4fd567e3ee46593f2b974b3894cbdeaf87a81a87551b0631c2c" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.254802 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.654112 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"143bdc31-dc3f-4c3f-81e2-9e4314ba960d","Type":"ContainerStarted","Data":"6bb73f8dc9c61a664fe7b63d05c97cce0fd49b3510e1fefb12e8898841e3acf8"} Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.673442 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.674386 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.678685 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.67865077 podStartE2EDuration="3.67865077s" podCreationTimestamp="2025-11-22 07:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:49:01.672146249 +0000 UTC m=+2278.781600262" watchObservedRunningTime="2025-11-22 07:49:01.67865077 +0000 UTC m=+2278.788104783" Nov 22 07:49:01 crc kubenswrapper[4929]: I1122 07:49:01.806264 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:01 crc kubenswrapper[4929]: W1122 07:49:01.829689 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38e1b0fb_8aaf_48b9_9dc1_9c1c80d2bd79.slice/crio-825d73c23409b6a059da0989fa9bac5b7821f9ad616450cb3751bc7232bd6bd3 WatchSource:0}: Error finding container 825d73c23409b6a059da0989fa9bac5b7821f9ad616450cb3751bc7232bd6bd3: Status 404 returned error can't find the container with id 825d73c23409b6a059da0989fa9bac5b7821f9ad616450cb3751bc7232bd6bd3 Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:01.960807 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="494b6c5c-2251-443b-bd47-fa903a139c47" path="/var/lib/kubelet/pods/494b6c5c-2251-443b-bd47-fa903a139c47/volumes" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:01.961541 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc78f9a9-1829-44d9-b141-d9ddc9f63da7" path="/var/lib/kubelet/pods/bc78f9a9-1829-44d9-b141-d9ddc9f63da7/volumes" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:01.962366 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4e20beb-f178-437e-a8db-2138531b8aab" path="/var/lib/kubelet/pods/c4e20beb-f178-437e-a8db-2138531b8aab/volumes" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:01.983321 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.682334 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerStarted","Data":"825d73c23409b6a059da0989fa9bac5b7821f9ad616450cb3751bc7232bd6bd3"} Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.684895 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bae86c1f-5d59-4797-9d7e-f60207880160","Type":"ContainerStarted","Data":"54db57e597f398a75415876e502bd185a4937301ccbca8b74443146be47dfeb0"} Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.685367 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" containerID="cri-o://3c0a40c583532b24c72a8476490339f89ca8e2d8c95fee096bc9399561142705" gracePeriod=30 Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.686144 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" containerID="cri-o://522ec04975e0cb6924ba8ce234fb987d75495d028d28f1ab472626dc3377bdb5" gracePeriod=30 Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.694548 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.201:9292/healthcheck\": EOF" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.694624 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.201:9292/healthcheck\": EOF" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.717761 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.201:9292/healthcheck\": read tcp 10.217.0.2:58258->10.217.0.201:9292: read: connection reset by peer" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:02.717878 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.201:9292/healthcheck\": read tcp 10.217.0.2:58274->10.217.0.201:9292: read: connection reset by peer" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:03.696164 4929 generic.go:334] "Generic (PLEG): container finished" podID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerID="3c0a40c583532b24c72a8476490339f89ca8e2d8c95fee096bc9399561142705" exitCode=143 Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:03.696255 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerDied","Data":"3c0a40c583532b24c72a8476490339f89ca8e2d8c95fee096bc9399561142705"} Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:03.971706 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:04.707355 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bae86c1f-5d59-4797-9d7e-f60207880160","Type":"ContainerStarted","Data":"0ca76f0bb625cc90ba24f14cf33eeeae3249a261fa44367b42f6b185b22281ca"} Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:04.799632 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-api-0" podUID="c0f7a9d8-548a-4490-9fd9-8c431e6ca06a" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.200:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:04.842158 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:04.964823 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:06.744077 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"bae86c1f-5d59-4797-9d7e-f60207880160","Type":"ContainerStarted","Data":"fef40ee2b8d38e32a37e636a82fd355d781321d6859a8811ccec7d3f614cec23"} Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:06.779639 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.77961502 podStartE2EDuration="6.77961502s" podCreationTimestamp="2025-11-22 07:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:49:06.761544011 +0000 UTC m=+2283.870998034" watchObservedRunningTime="2025-11-22 07:49:06.77961502 +0000 UTC m=+2283.889069033" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:09.381404 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 07:49:09 crc kubenswrapper[4929]: I1122 07:49:09.774280 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerStarted","Data":"869fd83de686651927d4fb4989cae4e8bacfe371643555a8daee49f32a6a7d8d"} Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.255564 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.255633 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.311625 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.311728 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.798469 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.799014 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.816763 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:11 crc kubenswrapper[4929]: I1122 07:49:11.817108 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="a6b33a22-ad43-4a85-bcb7-8345455b171c" containerName="watcher-decision-engine" containerID="cri-o://8becd2cfdef3a29b1447b0bafded6a8d87437880ebb31f22a87d12f27f0bea6f" gracePeriod=30 Nov 22 07:49:12 crc kubenswrapper[4929]: I1122 07:49:12.809870 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerStarted","Data":"645d16c011105be1eebe243090745ca87e7d96ab6084b18fc4df9510a5c2c034"} Nov 22 07:49:13 crc kubenswrapper[4929]: I1122 07:49:13.767657 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 07:49:13 crc kubenswrapper[4929]: I1122 07:49:13.819015 4929 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 07:49:13 crc kubenswrapper[4929]: I1122 07:49:13.890267 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 07:49:14 crc kubenswrapper[4929]: I1122 07:49:14.830983 4929 generic.go:334] "Generic (PLEG): container finished" podID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerID="522ec04975e0cb6924ba8ce234fb987d75495d028d28f1ab472626dc3377bdb5" exitCode=0 Nov 22 07:49:14 crc kubenswrapper[4929]: I1122 07:49:14.831146 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerDied","Data":"522ec04975e0cb6924ba8ce234fb987d75495d028d28f1ab472626dc3377bdb5"} Nov 22 07:49:14 crc kubenswrapper[4929]: I1122 07:49:14.834336 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerStarted","Data":"59449bed598be38adf4735efc0db23bd23d40103ff8a6a5e1bc51930f02f9588"} Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.304078 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.434866 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435006 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nrjr2\" (UniqueName: \"kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435228 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435264 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435324 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435351 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435406 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.435446 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle\") pod \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\" (UID: \"7e821d91-79d4-4a36-8f13-2d2f591a3f62\") " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.436584 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs" (OuterVolumeSpecName: "logs") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.436703 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.441446 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts" (OuterVolumeSpecName: "scripts") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.444314 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.453952 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2" (OuterVolumeSpecName: "kube-api-access-nrjr2") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "kube-api-access-nrjr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.472806 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.494968 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.505261 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data" (OuterVolumeSpecName: "config-data") pod "7e821d91-79d4-4a36-8f13-2d2f591a3f62" (UID: "7e821d91-79d4-4a36-8f13-2d2f591a3f62"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537702 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537776 4929 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537795 4929 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537816 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537832 4929 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7e821d91-79d4-4a36-8f13-2d2f591a3f62-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537844 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537855 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e821d91-79d4-4a36-8f13-2d2f591a3f62-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.537867 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nrjr2\" (UniqueName: \"kubernetes.io/projected/7e821d91-79d4-4a36-8f13-2d2f591a3f62-kube-api-access-nrjr2\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.560961 4929 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.639874 4929 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.845059 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7e821d91-79d4-4a36-8f13-2d2f591a3f62","Type":"ContainerDied","Data":"a113c02fd5e128fdbc9da6dc8973ba7d1a6ecab47b49fab4649e06322d0fbdb6"} Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.845140 4929 scope.go:117] "RemoveContainer" containerID="522ec04975e0cb6924ba8ce234fb987d75495d028d28f1ab472626dc3377bdb5" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.845077 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.877930 4929 scope.go:117] "RemoveContainer" containerID="3c0a40c583532b24c72a8476490339f89ca8e2d8c95fee096bc9399561142705" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.882011 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.890017 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.914120 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:15 crc kubenswrapper[4929]: E1122 07:49:15.914592 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.914616 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" Nov 22 07:49:15 crc kubenswrapper[4929]: E1122 07:49:15.914641 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.914650 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.914886 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-log" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.914920 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" containerName="glance-httpd" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.916237 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.919729 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.922174 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.932115 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:15 crc kubenswrapper[4929]: I1122 07:49:15.962935 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e821d91-79d4-4a36-8f13-2d2f591a3f62" path="/var/lib/kubelet/pods/7e821d91-79d4-4a36-8f13-2d2f591a3f62/volumes" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.049929 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-config-data\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.049973 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-logs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050010 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050052 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qp5f\" (UniqueName: \"kubernetes.io/projected/695d5e6c-3514-4a49-bde8-53d4e84a7716-kube-api-access-8qp5f\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050077 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050112 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-scripts\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050199 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.050259 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.152521 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-config-data\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.152883 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-logs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.152922 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.152978 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qp5f\" (UniqueName: \"kubernetes.io/projected/695d5e6c-3514-4a49-bde8-53d4e84a7716-kube-api-access-8qp5f\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.153006 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.153032 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-scripts\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.153115 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.153164 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.153756 4929 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.154297 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.154420 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/695d5e6c-3514-4a49-bde8-53d4e84a7716-logs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.161480 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-scripts\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.162876 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.164067 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.198124 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qp5f\" (UniqueName: \"kubernetes.io/projected/695d5e6c-3514-4a49-bde8-53d4e84a7716-kube-api-access-8qp5f\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.199398 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/695d5e6c-3514-4a49-bde8-53d4e84a7716-config-data\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.253686 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"695d5e6c-3514-4a49-bde8-53d4e84a7716\") " pod="openstack/glance-default-internal-api-0" Nov 22 07:49:16 crc kubenswrapper[4929]: I1122 07:49:16.535781 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:17 crc kubenswrapper[4929]: W1122 07:49:17.079539 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod695d5e6c_3514_4a49_bde8_53d4e84a7716.slice/crio-727a840a1daf4fda5dfd179e5da4ab8310ad4652b652ba4dd539aa94a498163e WatchSource:0}: Error finding container 727a840a1daf4fda5dfd179e5da4ab8310ad4652b652ba4dd539aa94a498163e: Status 404 returned error can't find the container with id 727a840a1daf4fda5dfd179e5da4ab8310ad4652b652ba4dd539aa94a498163e Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.082371 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.886027 4929 generic.go:334] "Generic (PLEG): container finished" podID="a6b33a22-ad43-4a85-bcb7-8345455b171c" containerID="8becd2cfdef3a29b1447b0bafded6a8d87437880ebb31f22a87d12f27f0bea6f" exitCode=0 Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.886136 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a6b33a22-ad43-4a85-bcb7-8345455b171c","Type":"ContainerDied","Data":"8becd2cfdef3a29b1447b0bafded6a8d87437880ebb31f22a87d12f27f0bea6f"} Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.889667 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-central-agent" containerID="cri-o://869fd83de686651927d4fb4989cae4e8bacfe371643555a8daee49f32a6a7d8d" gracePeriod=30 Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.889723 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="sg-core" containerID="cri-o://59449bed598be38adf4735efc0db23bd23d40103ff8a6a5e1bc51930f02f9588" gracePeriod=30 Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.889792 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="proxy-httpd" containerID="cri-o://55cc4bdaf8ef8e422270c95dbe70b5a3ebb69fe5d9ee13a49d233ff73c70fd7f" gracePeriod=30 Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.889814 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-notification-agent" containerID="cri-o://645d16c011105be1eebe243090745ca87e7d96ab6084b18fc4df9510a5c2c034" gracePeriod=30 Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.889591 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerStarted","Data":"55cc4bdaf8ef8e422270c95dbe70b5a3ebb69fe5d9ee13a49d233ff73c70fd7f"} Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.893983 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"695d5e6c-3514-4a49-bde8-53d4e84a7716","Type":"ContainerStarted","Data":"3337e04aa4c0143b5abcfc04e3129f092f1ea0963f28d0982e2bf5e4f271c8c9"} Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.894123 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"695d5e6c-3514-4a49-bde8-53d4e84a7716","Type":"ContainerStarted","Data":"727a840a1daf4fda5dfd179e5da4ab8310ad4652b652ba4dd539aa94a498163e"} Nov 22 07:49:17 crc kubenswrapper[4929]: I1122 07:49:17.894277 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.711811 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.728934 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.332408858 podStartE2EDuration="18.728914678s" podCreationTimestamp="2025-11-22 07:49:00 +0000 UTC" firstStartedPulling="2025-11-22 07:49:01.834483098 +0000 UTC m=+2278.943937111" lastFinishedPulling="2025-11-22 07:49:16.230988918 +0000 UTC m=+2293.340442931" observedRunningTime="2025-11-22 07:49:17.919056527 +0000 UTC m=+2295.028510540" watchObservedRunningTime="2025-11-22 07:49:18.728914678 +0000 UTC m=+2295.838368691" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.802550 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle\") pod \"a6b33a22-ad43-4a85-bcb7-8345455b171c\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.802654 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca\") pod \"a6b33a22-ad43-4a85-bcb7-8345455b171c\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.802685 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data\") pod \"a6b33a22-ad43-4a85-bcb7-8345455b171c\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.802752 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx5qr\" (UniqueName: \"kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr\") pod \"a6b33a22-ad43-4a85-bcb7-8345455b171c\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.802778 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs\") pod \"a6b33a22-ad43-4a85-bcb7-8345455b171c\" (UID: \"a6b33a22-ad43-4a85-bcb7-8345455b171c\") " Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.803253 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs" (OuterVolumeSpecName: "logs") pod "a6b33a22-ad43-4a85-bcb7-8345455b171c" (UID: "a6b33a22-ad43-4a85-bcb7-8345455b171c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.803667 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a6b33a22-ad43-4a85-bcb7-8345455b171c-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.808701 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr" (OuterVolumeSpecName: "kube-api-access-mx5qr") pod "a6b33a22-ad43-4a85-bcb7-8345455b171c" (UID: "a6b33a22-ad43-4a85-bcb7-8345455b171c"). InnerVolumeSpecName "kube-api-access-mx5qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.835564 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6b33a22-ad43-4a85-bcb7-8345455b171c" (UID: "a6b33a22-ad43-4a85-bcb7-8345455b171c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.841379 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "a6b33a22-ad43-4a85-bcb7-8345455b171c" (UID: "a6b33a22-ad43-4a85-bcb7-8345455b171c"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.861715 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data" (OuterVolumeSpecName: "config-data") pod "a6b33a22-ad43-4a85-bcb7-8345455b171c" (UID: "a6b33a22-ad43-4a85-bcb7-8345455b171c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.902676 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"695d5e6c-3514-4a49-bde8-53d4e84a7716","Type":"ContainerStarted","Data":"82dc289c1474d9f39eb84d6f06899fde509cc82c575140cc9c23f3a25a913088"} Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906848 4929 generic.go:334] "Generic (PLEG): container finished" podID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerID="55cc4bdaf8ef8e422270c95dbe70b5a3ebb69fe5d9ee13a49d233ff73c70fd7f" exitCode=0 Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906889 4929 generic.go:334] "Generic (PLEG): container finished" podID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerID="59449bed598be38adf4735efc0db23bd23d40103ff8a6a5e1bc51930f02f9588" exitCode=2 Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906902 4929 generic.go:334] "Generic (PLEG): container finished" podID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerID="645d16c011105be1eebe243090745ca87e7d96ab6084b18fc4df9510a5c2c034" exitCode=0 Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906919 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerDied","Data":"55cc4bdaf8ef8e422270c95dbe70b5a3ebb69fe5d9ee13a49d233ff73c70fd7f"} Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906967 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerDied","Data":"59449bed598be38adf4735efc0db23bd23d40103ff8a6a5e1bc51930f02f9588"} Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.906979 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerDied","Data":"645d16c011105be1eebe243090745ca87e7d96ab6084b18fc4df9510a5c2c034"} Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908191 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908235 4929 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908246 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6b33a22-ad43-4a85-bcb7-8345455b171c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908255 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx5qr\" (UniqueName: \"kubernetes.io/projected/a6b33a22-ad43-4a85-bcb7-8345455b171c-kube-api-access-mx5qr\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908381 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a6b33a22-ad43-4a85-bcb7-8345455b171c","Type":"ContainerDied","Data":"9c3a2c976bf5d48b9f7164f7bf47eafa444109f9585fbf0ed75629d492e58c88"} Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908420 4929 scope.go:117] "RemoveContainer" containerID="8becd2cfdef3a29b1447b0bafded6a8d87437880ebb31f22a87d12f27f0bea6f" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.908476 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.947462 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.947442902 podStartE2EDuration="3.947442902s" podCreationTimestamp="2025-11-22 07:49:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:49:18.931770593 +0000 UTC m=+2296.041224616" watchObservedRunningTime="2025-11-22 07:49:18.947442902 +0000 UTC m=+2296.056896915" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.951683 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.962483 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.973872 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:18 crc kubenswrapper[4929]: E1122 07:49:18.974586 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6b33a22-ad43-4a85-bcb7-8345455b171c" containerName="watcher-decision-engine" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.974675 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6b33a22-ad43-4a85-bcb7-8345455b171c" containerName="watcher-decision-engine" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.974954 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6b33a22-ad43-4a85-bcb7-8345455b171c" containerName="watcher-decision-engine" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.975699 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.979245 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Nov 22 07:49:18 crc kubenswrapper[4929]: I1122 07:49:18.998786 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.112703 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3dae3e9-bb7e-42e8-801e-b313a6385954-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.113096 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.113288 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.113411 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhlwp\" (UniqueName: \"kubernetes.io/projected/f3dae3e9-bb7e-42e8-801e-b313a6385954-kube-api-access-bhlwp\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.113543 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.215512 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhlwp\" (UniqueName: \"kubernetes.io/projected/f3dae3e9-bb7e-42e8-801e-b313a6385954-kube-api-access-bhlwp\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.215576 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.215626 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3dae3e9-bb7e-42e8-801e-b313a6385954-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.215790 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.215851 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.219522 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f3dae3e9-bb7e-42e8-801e-b313a6385954-logs\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.221048 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.229281 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-config-data\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.235192 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3dae3e9-bb7e-42e8-801e-b313a6385954-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.235569 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhlwp\" (UniqueName: \"kubernetes.io/projected/f3dae3e9-bb7e-42e8-801e-b313a6385954-kube-api-access-bhlwp\") pod \"watcher-decision-engine-0\" (UID: \"f3dae3e9-bb7e-42e8-801e-b313a6385954\") " pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.293633 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:19 crc kubenswrapper[4929]: W1122 07:49:19.717979 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3dae3e9_bb7e_42e8_801e_b313a6385954.slice/crio-101352738e8aa49ac569750d68f6b0bbcea09e58bada6668b44cfa77a1cbf434 WatchSource:0}: Error finding container 101352738e8aa49ac569750d68f6b0bbcea09e58bada6668b44cfa77a1cbf434: Status 404 returned error can't find the container with id 101352738e8aa49ac569750d68f6b0bbcea09e58bada6668b44cfa77a1cbf434 Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.726285 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.919690 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"f3dae3e9-bb7e-42e8-801e-b313a6385954","Type":"ContainerStarted","Data":"101352738e8aa49ac569750d68f6b0bbcea09e58bada6668b44cfa77a1cbf434"} Nov 22 07:49:19 crc kubenswrapper[4929]: I1122 07:49:19.970865 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6b33a22-ad43-4a85-bcb7-8345455b171c" path="/var/lib/kubelet/pods/a6b33a22-ad43-4a85-bcb7-8345455b171c/volumes" Nov 22 07:49:20 crc kubenswrapper[4929]: I1122 07:49:20.934087 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"f3dae3e9-bb7e-42e8-801e-b313a6385954","Type":"ContainerStarted","Data":"b970e3bcc174eab26ff64710a2ff220f7715357139aa7d990d0d9ba72f41a503"} Nov 22 07:49:20 crc kubenswrapper[4929]: I1122 07:49:20.958875 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.958857877 podStartE2EDuration="2.958857877s" podCreationTimestamp="2025-11-22 07:49:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:49:20.955570816 +0000 UTC m=+2298.065024829" watchObservedRunningTime="2025-11-22 07:49:20.958857877 +0000 UTC m=+2298.068311890" Nov 22 07:49:24 crc kubenswrapper[4929]: I1122 07:49:24.977622 4929 generic.go:334] "Generic (PLEG): container finished" podID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerID="869fd83de686651927d4fb4989cae4e8bacfe371643555a8daee49f32a6a7d8d" exitCode=0 Nov 22 07:49:24 crc kubenswrapper[4929]: I1122 07:49:24.977665 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerDied","Data":"869fd83de686651927d4fb4989cae4e8bacfe371643555a8daee49f32a6a7d8d"} Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.675650 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.749846 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750325 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750390 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750469 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750619 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750703 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750787 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.750872 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qc82\" (UniqueName: \"kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82\") pod \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\" (UID: \"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79\") " Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.751171 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.751235 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.751813 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.752138 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.757506 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts" (OuterVolumeSpecName: "scripts") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.760639 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82" (OuterVolumeSpecName: "kube-api-access-5qc82") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "kube-api-access-5qc82". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.786257 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.818340 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.839280 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.854554 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.854833 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.856600 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.856742 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.856848 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qc82\" (UniqueName: \"kubernetes.io/projected/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-kube-api-access-5qc82\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.863686 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data" (OuterVolumeSpecName: "config-data") pod "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" (UID: "38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.961050 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.993719 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79","Type":"ContainerDied","Data":"825d73c23409b6a059da0989fa9bac5b7821f9ad616450cb3751bc7232bd6bd3"} Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.993800 4929 scope.go:117] "RemoveContainer" containerID="55cc4bdaf8ef8e422270c95dbe70b5a3ebb69fe5d9ee13a49d233ff73c70fd7f" Nov 22 07:49:25 crc kubenswrapper[4929]: I1122 07:49:25.994660 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.053858 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.067148 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.085031 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:26 crc kubenswrapper[4929]: E1122 07:49:26.085530 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="sg-core" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.085551 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="sg-core" Nov 22 07:49:26 crc kubenswrapper[4929]: E1122 07:49:26.085564 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-notification-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.085572 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-notification-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: E1122 07:49:26.085648 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-central-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.085662 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-central-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: E1122 07:49:26.085713 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="proxy-httpd" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.085722 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="proxy-httpd" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.086051 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-central-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.086104 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="proxy-httpd" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.086125 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="ceilometer-notification-agent" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.086143 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" containerName="sg-core" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.087154 4929 scope.go:117] "RemoveContainer" containerID="59449bed598be38adf4735efc0db23bd23d40103ff8a6a5e1bc51930f02f9588" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.092964 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.095062 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.096060 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.097075 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.102172 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.139916 4929 scope.go:117] "RemoveContainer" containerID="645d16c011105be1eebe243090745ca87e7d96ab6084b18fc4df9510a5c2c034" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.164530 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.164606 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.164641 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.164824 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.165043 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbmwl\" (UniqueName: \"kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.165273 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.165376 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.165406 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.179960 4929 scope.go:117] "RemoveContainer" containerID="869fd83de686651927d4fb4989cae4e8bacfe371643555a8daee49f32a6a7d8d" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.266967 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267335 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267373 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267413 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267440 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267463 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267513 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbmwl\" (UniqueName: \"kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267570 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.267837 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.268204 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.271870 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.272675 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.275966 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.280736 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.508439 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbmwl\" (UniqueName: \"kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.508444 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " pod="openstack/ceilometer-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.536147 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.536201 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.568659 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.577640 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:26 crc kubenswrapper[4929]: I1122 07:49:26.721654 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:49:27 crc kubenswrapper[4929]: I1122 07:49:27.010008 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:27 crc kubenswrapper[4929]: I1122 07:49:27.020715 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:27 crc kubenswrapper[4929]: I1122 07:49:27.177239 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:49:27 crc kubenswrapper[4929]: W1122 07:49:27.185101 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a562fe7_c613_46c8_81c5_37795061c60b.slice/crio-e82d81ca244fc9e3390a9ac2480c149ce427a2684cb13753ef903eb2da3ea618 WatchSource:0}: Error finding container e82d81ca244fc9e3390a9ac2480c149ce427a2684cb13753ef903eb2da3ea618: Status 404 returned error can't find the container with id e82d81ca244fc9e3390a9ac2480c149ce427a2684cb13753ef903eb2da3ea618 Nov 22 07:49:27 crc kubenswrapper[4929]: I1122 07:49:27.968570 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79" path="/var/lib/kubelet/pods/38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79/volumes" Nov 22 07:49:28 crc kubenswrapper[4929]: I1122 07:49:28.022203 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerStarted","Data":"e82d81ca244fc9e3390a9ac2480c149ce427a2684cb13753ef903eb2da3ea618"} Nov 22 07:49:28 crc kubenswrapper[4929]: I1122 07:49:28.899531 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:28 crc kubenswrapper[4929]: I1122 07:49:28.902487 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 07:49:29 crc kubenswrapper[4929]: I1122 07:49:29.295253 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:29 crc kubenswrapper[4929]: I1122 07:49:29.326692 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:30 crc kubenswrapper[4929]: I1122 07:49:30.045093 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerStarted","Data":"080cb2e25a279daf203e4da85aa709a65e6cff2f89d6e42a1c9c7b5b61e65730"} Nov 22 07:49:30 crc kubenswrapper[4929]: I1122 07:49:30.046437 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:30 crc kubenswrapper[4929]: I1122 07:49:30.074858 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.109643 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.113306 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.120686 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.275145 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppbvm\" (UniqueName: \"kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.275278 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.275310 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.377228 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.377281 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.377396 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppbvm\" (UniqueName: \"kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.377752 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.377975 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.398871 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppbvm\" (UniqueName: \"kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm\") pod \"redhat-marketplace-sq949\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.460190 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.712559 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.714987 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.740276 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.896318 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.897340 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:31 crc kubenswrapper[4929]: I1122 07:49:31.897541 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqkhp\" (UniqueName: \"kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.000405 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqkhp\" (UniqueName: \"kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.000512 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.000581 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.001119 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.001745 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.023122 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqkhp\" (UniqueName: \"kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp\") pod \"redhat-operators-7wb2m\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.129141 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.219171 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:49:32 crc kubenswrapper[4929]: W1122 07:49:32.229425 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod680b78e9_161f_47a3_8fe8_39fc6602b0f7.slice/crio-e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7 WatchSource:0}: Error finding container e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7: Status 404 returned error can't find the container with id e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7 Nov 22 07:49:32 crc kubenswrapper[4929]: I1122 07:49:32.631032 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:49:32 crc kubenswrapper[4929]: W1122 07:49:32.640121 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd35db38_a8f4_40d9_805b_af72be930c0a.slice/crio-8177c37a94266df4e7da309f1f3f1f2f4ed02de1ddf4bc1efd1b29cab7b752d5 WatchSource:0}: Error finding container 8177c37a94266df4e7da309f1f3f1f2f4ed02de1ddf4bc1efd1b29cab7b752d5: Status 404 returned error can't find the container with id 8177c37a94266df4e7da309f1f3f1f2f4ed02de1ddf4bc1efd1b29cab7b752d5 Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.073923 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerStarted","Data":"a92fab0f52df75425ebdd4084e8dae90df47034e9f7da6a599d710d5f9a0651f"} Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.074872 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerStarted","Data":"8177c37a94266df4e7da309f1f3f1f2f4ed02de1ddf4bc1efd1b29cab7b752d5"} Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.076129 4929 generic.go:334] "Generic (PLEG): container finished" podID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerID="ed8aef95546e74d357af6356c52f0dc6f9dab6eeced8d1b329f82e3c3da590e7" exitCode=0 Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.076162 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerDied","Data":"ed8aef95546e74d357af6356c52f0dc6f9dab6eeced8d1b329f82e3c3da590e7"} Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.076179 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerStarted","Data":"e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7"} Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.506043 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.509962 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.526052 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.630941 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.631057 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.631094 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rngqq\" (UniqueName: \"kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.733279 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.733368 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.733401 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rngqq\" (UniqueName: \"kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.734318 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.734754 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.752445 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rngqq\" (UniqueName: \"kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq\") pod \"certified-operators-75bj8\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:33 crc kubenswrapper[4929]: I1122 07:49:33.835808 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.105585 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.108426 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.116382 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.242765 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.242874 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.242949 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tmfw\" (UniqueName: \"kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.344471 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.344584 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tmfw\" (UniqueName: \"kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.344685 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.345101 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.345131 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.416409 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tmfw\" (UniqueName: \"kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw\") pod \"community-operators-qmh7p\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.427818 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:49:34 crc kubenswrapper[4929]: I1122 07:49:34.570795 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:49:35 crc kubenswrapper[4929]: I1122 07:49:35.101516 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerStarted","Data":"934eb2ab2cbd2c1be8a325aa9c4efb31cd7ae477155e7793ee42d2f6eac99052"} Nov 22 07:49:35 crc kubenswrapper[4929]: I1122 07:49:35.103875 4929 generic.go:334] "Generic (PLEG): container finished" podID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerID="2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690" exitCode=0 Nov 22 07:49:35 crc kubenswrapper[4929]: I1122 07:49:35.103901 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerDied","Data":"2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690"} Nov 22 07:49:35 crc kubenswrapper[4929]: W1122 07:49:35.272868 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb865101_8d1a_4930_b37d_73df5d40e618.slice/crio-da6f8d4b230cc1c076cb55b5ef67f13521dd3c4cfa7fc60cb98e4ba183afd94b WatchSource:0}: Error finding container da6f8d4b230cc1c076cb55b5ef67f13521dd3c4cfa7fc60cb98e4ba183afd94b: Status 404 returned error can't find the container with id da6f8d4b230cc1c076cb55b5ef67f13521dd3c4cfa7fc60cb98e4ba183afd94b Nov 22 07:49:35 crc kubenswrapper[4929]: I1122 07:49:35.280172 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:49:36 crc kubenswrapper[4929]: I1122 07:49:36.116119 4929 generic.go:334] "Generic (PLEG): container finished" podID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerID="fdfc31f4e597174b7099d2e30a333ca3310673299b08595790d44c4cf139001c" exitCode=0 Nov 22 07:49:36 crc kubenswrapper[4929]: I1122 07:49:36.116266 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerDied","Data":"fdfc31f4e597174b7099d2e30a333ca3310673299b08595790d44c4cf139001c"} Nov 22 07:49:36 crc kubenswrapper[4929]: I1122 07:49:36.118737 4929 generic.go:334] "Generic (PLEG): container finished" podID="cb865101-8d1a-4930-b37d-73df5d40e618" containerID="b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc" exitCode=0 Nov 22 07:49:36 crc kubenswrapper[4929]: I1122 07:49:36.118774 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerDied","Data":"b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc"} Nov 22 07:49:36 crc kubenswrapper[4929]: I1122 07:49:36.118799 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerStarted","Data":"da6f8d4b230cc1c076cb55b5ef67f13521dd3c4cfa7fc60cb98e4ba183afd94b"} Nov 22 07:49:38 crc kubenswrapper[4929]: I1122 07:49:38.144065 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerStarted","Data":"6cd568d3c19d081d03ca23ea9f029dc2bf93d2ca641f62ecfdff0851876da25e"} Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.187191 4929 generic.go:334] "Generic (PLEG): container finished" podID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerID="561660201fd6b21e088e8e5583de7e6f59268d27fc8a1f1a0cfe988441824098" exitCode=0 Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.187248 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerDied","Data":"561660201fd6b21e088e8e5583de7e6f59268d27fc8a1f1a0cfe988441824098"} Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.191700 4929 generic.go:334] "Generic (PLEG): container finished" podID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerID="32b95ffdc9ac1640e481d0df0e7dabfef2c06b4065c1b1b3ba610bc8b2daab8f" exitCode=0 Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.191759 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerDied","Data":"32b95ffdc9ac1640e481d0df0e7dabfef2c06b4065c1b1b3ba610bc8b2daab8f"} Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.194973 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerStarted","Data":"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649"} Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.198638 4929 generic.go:334] "Generic (PLEG): container finished" podID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerID="63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee" exitCode=0 Nov 22 07:49:42 crc kubenswrapper[4929]: I1122 07:49:42.198690 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerDied","Data":"63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee"} Nov 22 07:49:55 crc kubenswrapper[4929]: I1122 07:49:55.013576 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="143bdc31-dc3f-4c3f-81e2-9e4314ba960d" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.203:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:49:56 crc kubenswrapper[4929]: I1122 07:49:56.023056 4929 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod38e1b0fb-8aaf-48b9-9dc1-9c1c80d2bd79] : Timed out while waiting for systemd to remove kubepods-besteffort-pod38e1b0fb_8aaf_48b9_9dc1_9c1c80d2bd79.slice" Nov 22 07:50:07 crc kubenswrapper[4929]: I1122 07:50:07.014560 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="143bdc31-dc3f-4c3f-81e2-9e4314ba960d" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.203:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.532475 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerStarted","Data":"329efd441e443fb2ef0c06f7ff68c7edd6bc9f2c35c907c14a97c01cf45a77d6"} Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.536576 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerStarted","Data":"3ad080c84816b41fcbfa80215e45b827e45fb0c1f70671e98537bf97abda8226"} Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.538654 4929 generic.go:334] "Generic (PLEG): container finished" podID="cb865101-8d1a-4930-b37d-73df5d40e618" containerID="241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649" exitCode=0 Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.538685 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerDied","Data":"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649"} Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.541003 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.546460 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerStarted","Data":"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d"} Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.551258 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerStarted","Data":"05d89016841dbe3c5ab5031215bead7cec49f477e73ca138789611724d8f9edd"} Nov 22 07:50:10 crc kubenswrapper[4929]: I1122 07:50:10.583905 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sq949" podStartSLOduration=3.550779928 podStartE2EDuration="39.583880612s" podCreationTimestamp="2025-11-22 07:49:31 +0000 UTC" firstStartedPulling="2025-11-22 07:49:33.078371911 +0000 UTC m=+2310.187825924" lastFinishedPulling="2025-11-22 07:50:09.111472595 +0000 UTC m=+2346.220926608" observedRunningTime="2025-11-22 07:50:10.581052381 +0000 UTC m=+2347.690506404" watchObservedRunningTime="2025-11-22 07:50:10.583880612 +0000 UTC m=+2347.693334625" Nov 22 07:50:11 crc kubenswrapper[4929]: I1122 07:50:11.461509 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:50:11 crc kubenswrapper[4929]: I1122 07:50:11.462071 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:50:11 crc kubenswrapper[4929]: I1122 07:50:11.599065 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=16.816266081 podStartE2EDuration="45.599038798s" podCreationTimestamp="2025-11-22 07:49:26 +0000 UTC" firstStartedPulling="2025-11-22 07:49:27.188554722 +0000 UTC m=+2304.298008785" lastFinishedPulling="2025-11-22 07:49:55.971327489 +0000 UTC m=+2333.080781502" observedRunningTime="2025-11-22 07:50:11.583192505 +0000 UTC m=+2348.692646558" watchObservedRunningTime="2025-11-22 07:50:11.599038798 +0000 UTC m=+2348.708492811" Nov 22 07:50:11 crc kubenswrapper[4929]: I1122 07:50:11.620133 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7wb2m" podStartSLOduration=6.614171993 podStartE2EDuration="40.620111291s" podCreationTimestamp="2025-11-22 07:49:31 +0000 UTC" firstStartedPulling="2025-11-22 07:49:35.106208134 +0000 UTC m=+2312.215662147" lastFinishedPulling="2025-11-22 07:50:09.112147432 +0000 UTC m=+2346.221601445" observedRunningTime="2025-11-22 07:50:11.603469908 +0000 UTC m=+2348.712923921" watchObservedRunningTime="2025-11-22 07:50:11.620111291 +0000 UTC m=+2348.729565304" Nov 22 07:50:11 crc kubenswrapper[4929]: I1122 07:50:11.630494 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-75bj8" podStartSLOduration=8.677725602 podStartE2EDuration="38.630471528s" podCreationTimestamp="2025-11-22 07:49:33 +0000 UTC" firstStartedPulling="2025-11-22 07:49:36.117689599 +0000 UTC m=+2313.227143612" lastFinishedPulling="2025-11-22 07:50:06.070435535 +0000 UTC m=+2343.179889538" observedRunningTime="2025-11-22 07:50:11.622975752 +0000 UTC m=+2348.732429765" watchObservedRunningTime="2025-11-22 07:50:11.630471528 +0000 UTC m=+2348.739925541" Nov 22 07:50:12 crc kubenswrapper[4929]: I1122 07:50:12.129357 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:12 crc kubenswrapper[4929]: I1122 07:50:12.129435 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:12 crc kubenswrapper[4929]: I1122 07:50:12.542081 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:12 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:12 crc kubenswrapper[4929]: > Nov 22 07:50:13 crc kubenswrapper[4929]: I1122 07:50:13.455974 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7wb2m" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:13 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:13 crc kubenswrapper[4929]: > Nov 22 07:50:13 crc kubenswrapper[4929]: I1122 07:50:13.836280 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:13 crc kubenswrapper[4929]: I1122 07:50:13.836353 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:13 crc kubenswrapper[4929]: I1122 07:50:13.885904 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:14 crc kubenswrapper[4929]: I1122 07:50:14.632959 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:15 crc kubenswrapper[4929]: I1122 07:50:15.295627 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:50:16 crc kubenswrapper[4929]: I1122 07:50:16.607571 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-75bj8" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="registry-server" containerID="cri-o://3ad080c84816b41fcbfa80215e45b827e45fb0c1f70671e98537bf97abda8226" gracePeriod=2 Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.065971 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-5t55t"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.079143 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-b7aa-account-create-6jf49"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.090364 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-e669-account-create-phsc5"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.100740 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-f7jtj"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.108578 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-f7jtj"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.116768 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-b7aa-account-create-6jf49"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.124957 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-5t55t"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.132646 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-e669-account-create-phsc5"] Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.636677 4929 generic.go:334] "Generic (PLEG): container finished" podID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerID="3ad080c84816b41fcbfa80215e45b827e45fb0c1f70671e98537bf97abda8226" exitCode=0 Nov 22 07:50:18 crc kubenswrapper[4929]: I1122 07:50:18.636746 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerDied","Data":"3ad080c84816b41fcbfa80215e45b827e45fb0c1f70671e98537bf97abda8226"} Nov 22 07:50:19 crc kubenswrapper[4929]: I1122 07:50:19.961748 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49469bee-1b64-40ca-a873-bf08fc285efa" path="/var/lib/kubelet/pods/49469bee-1b64-40ca-a873-bf08fc285efa/volumes" Nov 22 07:50:19 crc kubenswrapper[4929]: I1122 07:50:19.962909 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="757783e8-e867-426d-94e0-952fd124f60c" path="/var/lib/kubelet/pods/757783e8-e867-426d-94e0-952fd124f60c/volumes" Nov 22 07:50:19 crc kubenswrapper[4929]: I1122 07:50:19.963609 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8043acad-43a7-4549-b279-b9545f71283a" path="/var/lib/kubelet/pods/8043acad-43a7-4549-b279-b9545f71283a/volumes" Nov 22 07:50:20 crc kubenswrapper[4929]: I1122 07:50:20.055495 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b4b632b-b258-4862-aeda-4c06647d490f" path="/var/lib/kubelet/pods/9b4b632b-b258-4862-aeda-4c06647d490f/volumes" Nov 22 07:50:20 crc kubenswrapper[4929]: I1122 07:50:20.056776 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-xhj8d"] Nov 22 07:50:20 crc kubenswrapper[4929]: I1122 07:50:20.068638 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-xhj8d"] Nov 22 07:50:20 crc kubenswrapper[4929]: I1122 07:50:20.079021 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-0fbc-account-create-xmjgc"] Nov 22 07:50:20 crc kubenswrapper[4929]: I1122 07:50:20.087103 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-0fbc-account-create-xmjgc"] Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.041349 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-xh9z8"] Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.052734 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-xh9z8"] Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.770019 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.880428 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content\") pod \"9bcb575a-cde1-458f-85d0-17ab7cf60522\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.880515 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities\") pod \"9bcb575a-cde1-458f-85d0-17ab7cf60522\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.880669 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rngqq\" (UniqueName: \"kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq\") pod \"9bcb575a-cde1-458f-85d0-17ab7cf60522\" (UID: \"9bcb575a-cde1-458f-85d0-17ab7cf60522\") " Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.881146 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities" (OuterVolumeSpecName: "utilities") pod "9bcb575a-cde1-458f-85d0-17ab7cf60522" (UID: "9bcb575a-cde1-458f-85d0-17ab7cf60522"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.889268 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq" (OuterVolumeSpecName: "kube-api-access-rngqq") pod "9bcb575a-cde1-458f-85d0-17ab7cf60522" (UID: "9bcb575a-cde1-458f-85d0-17ab7cf60522"). InnerVolumeSpecName "kube-api-access-rngqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.960929 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d10c475-c29a-43b7-8607-71679b6109eb" path="/var/lib/kubelet/pods/2d10c475-c29a-43b7-8607-71679b6109eb/volumes" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.961718 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f" path="/var/lib/kubelet/pods/65cd3e57-02b6-46a6-a3bf-2fad9cf13a2f/volumes" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.962504 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="989d7793-3fc8-45d9-83dc-30f02a1e7876" path="/var/lib/kubelet/pods/989d7793-3fc8-45d9-83dc-30f02a1e7876/volumes" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.983197 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:21 crc kubenswrapper[4929]: I1122 07:50:21.983248 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rngqq\" (UniqueName: \"kubernetes.io/projected/9bcb575a-cde1-458f-85d0-17ab7cf60522-kube-api-access-rngqq\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.027002 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-618d-account-create-prpvg"] Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.034338 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-618d-account-create-prpvg"] Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.551904 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:22 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:22 crc kubenswrapper[4929]: > Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.683210 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75bj8" event={"ID":"9bcb575a-cde1-458f-85d0-17ab7cf60522","Type":"ContainerDied","Data":"934eb2ab2cbd2c1be8a325aa9c4efb31cd7ae477155e7793ee42d2f6eac99052"} Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.683406 4929 scope.go:117] "RemoveContainer" containerID="3ad080c84816b41fcbfa80215e45b827e45fb0c1f70671e98537bf97abda8226" Nov 22 07:50:22 crc kubenswrapper[4929]: I1122 07:50:22.683418 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75bj8" Nov 22 07:50:23 crc kubenswrapper[4929]: I1122 07:50:23.174950 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7wb2m" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:23 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:23 crc kubenswrapper[4929]: > Nov 22 07:50:23 crc kubenswrapper[4929]: I1122 07:50:23.975626 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57adf361-307d-4f8c-817c-6dcb54a40b4c" path="/var/lib/kubelet/pods/57adf361-307d-4f8c-817c-6dcb54a40b4c/volumes" Nov 22 07:50:24 crc kubenswrapper[4929]: I1122 07:50:24.434702 4929 scope.go:117] "RemoveContainer" containerID="32b95ffdc9ac1640e481d0df0e7dabfef2c06b4065c1b1b3ba610bc8b2daab8f" Nov 22 07:50:24 crc kubenswrapper[4929]: I1122 07:50:24.458650 4929 scope.go:117] "RemoveContainer" containerID="fdfc31f4e597174b7099d2e30a333ca3310673299b08595790d44c4cf139001c" Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.014670 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/cinder-scheduler-0" podUID="143bdc31-dc3f-4c3f-81e2-9e4314ba960d" containerName="cinder-scheduler" probeResult="failure" output="Get \"http://10.217.0.203:8080/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.031909 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-wp4fv"] Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.042846 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-x5n9p"] Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.054456 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-x5n9p"] Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.063588 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-wp4fv"] Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.959998 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f9656a5-c30f-414d-a694-1a34468a3040" path="/var/lib/kubelet/pods/4f9656a5-c30f-414d-a694-1a34468a3040/volumes" Nov 22 07:50:25 crc kubenswrapper[4929]: I1122 07:50:25.961238 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3378093-f835-4a48-9c94-0196360494db" path="/var/lib/kubelet/pods/c3378093-f835-4a48-9c94-0196360494db/volumes" Nov 22 07:50:26 crc kubenswrapper[4929]: I1122 07:50:26.031632 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-245c-account-create-vgxsx"] Nov 22 07:50:26 crc kubenswrapper[4929]: I1122 07:50:26.042792 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-245c-account-create-vgxsx"] Nov 22 07:50:26 crc kubenswrapper[4929]: I1122 07:50:26.723061 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:50:26 crc kubenswrapper[4929]: I1122 07:50:26.790986 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 07:50:27 crc kubenswrapper[4929]: I1122 07:50:27.738454 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerStarted","Data":"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae"} Nov 22 07:50:27 crc kubenswrapper[4929]: I1122 07:50:27.960570 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9986f3d9-5c31-45c9-be65-411d993ac709" path="/var/lib/kubelet/pods/9986f3d9-5c31-45c9-be65-411d993ac709/volumes" Nov 22 07:50:28 crc kubenswrapper[4929]: I1122 07:50:28.765855 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qmh7p" podStartSLOduration=7.729421386 podStartE2EDuration="54.765839901s" podCreationTimestamp="2025-11-22 07:49:34 +0000 UTC" firstStartedPulling="2025-11-22 07:49:37.398431588 +0000 UTC m=+2314.507885601" lastFinishedPulling="2025-11-22 07:50:24.434850103 +0000 UTC m=+2361.544304116" observedRunningTime="2025-11-22 07:50:28.764288242 +0000 UTC m=+2365.873742265" watchObservedRunningTime="2025-11-22 07:50:28.765839901 +0000 UTC m=+2365.875293914" Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.028723 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-01c3-account-create-s4ms5"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.037386 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-9x644"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.069032 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-cbf9-account-create-6dk52"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.073026 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-01c3-account-create-s4ms5"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.083865 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-9x644"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.094029 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-cbf9-account-create-6dk52"] Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.961448 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27746c3a-116c-4c6d-9697-9f44777a65be" path="/var/lib/kubelet/pods/27746c3a-116c-4c6d-9697-9f44777a65be/volumes" Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.962886 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a74e4b0-12f9-4d8f-a2b1-d8145c286d89" path="/var/lib/kubelet/pods/5a74e4b0-12f9-4d8f-a2b1-d8145c286d89/volumes" Nov 22 07:50:31 crc kubenswrapper[4929]: I1122 07:50:31.963491 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed5c1073-5bff-4d40-82fe-014200e5d8ca" path="/var/lib/kubelet/pods/ed5c1073-5bff-4d40-82fe-014200e5d8ca/volumes" Nov 22 07:50:32 crc kubenswrapper[4929]: I1122 07:50:32.513496 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:32 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:32 crc kubenswrapper[4929]: > Nov 22 07:50:33 crc kubenswrapper[4929]: I1122 07:50:33.175398 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7wb2m" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:33 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:33 crc kubenswrapper[4929]: > Nov 22 07:50:34 crc kubenswrapper[4929]: I1122 07:50:34.429356 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:34 crc kubenswrapper[4929]: I1122 07:50:34.429764 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.496116 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-qmh7p" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:35 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:35 crc kubenswrapper[4929]: > Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.813666 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bcb575a-cde1-458f-85d0-17ab7cf60522" (UID: "9bcb575a-cde1-458f-85d0-17ab7cf60522"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.868922 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bcb575a-cde1-458f-85d0-17ab7cf60522-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.936240 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.945044 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-75bj8"] Nov 22 07:50:35 crc kubenswrapper[4929]: I1122 07:50:35.958110 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" path="/var/lib/kubelet/pods/9bcb575a-cde1-458f-85d0-17ab7cf60522/volumes" Nov 22 07:50:42 crc kubenswrapper[4929]: I1122 07:50:42.172956 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:42 crc kubenswrapper[4929]: I1122 07:50:42.223168 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:42 crc kubenswrapper[4929]: I1122 07:50:42.407159 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:50:42 crc kubenswrapper[4929]: I1122 07:50:42.514350 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:42 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:42 crc kubenswrapper[4929]: > Nov 22 07:50:43 crc kubenswrapper[4929]: I1122 07:50:43.904641 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7wb2m" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" containerID="cri-o://18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d" gracePeriod=2 Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.371130 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.474790 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.538024 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.539763 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqkhp\" (UniqueName: \"kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp\") pod \"dd35db38-a8f4-40d9-805b-af72be930c0a\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.539840 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities\") pod \"dd35db38-a8f4-40d9-805b-af72be930c0a\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.539889 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content\") pod \"dd35db38-a8f4-40d9-805b-af72be930c0a\" (UID: \"dd35db38-a8f4-40d9-805b-af72be930c0a\") " Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.540693 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities" (OuterVolumeSpecName: "utilities") pod "dd35db38-a8f4-40d9-805b-af72be930c0a" (UID: "dd35db38-a8f4-40d9-805b-af72be930c0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.556354 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp" (OuterVolumeSpecName: "kube-api-access-fqkhp") pod "dd35db38-a8f4-40d9-805b-af72be930c0a" (UID: "dd35db38-a8f4-40d9-805b-af72be930c0a"). InnerVolumeSpecName "kube-api-access-fqkhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.629523 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dd35db38-a8f4-40d9-805b-af72be930c0a" (UID: "dd35db38-a8f4-40d9-805b-af72be930c0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.642008 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.642048 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dd35db38-a8f4-40d9-805b-af72be930c0a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.642064 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqkhp\" (UniqueName: \"kubernetes.io/projected/dd35db38-a8f4-40d9-805b-af72be930c0a-kube-api-access-fqkhp\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.915137 4929 generic.go:334] "Generic (PLEG): container finished" podID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerID="18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d" exitCode=0 Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.915186 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerDied","Data":"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d"} Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.915585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7wb2m" event={"ID":"dd35db38-a8f4-40d9-805b-af72be930c0a","Type":"ContainerDied","Data":"8177c37a94266df4e7da309f1f3f1f2f4ed02de1ddf4bc1efd1b29cab7b752d5"} Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.915273 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7wb2m" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.915627 4929 scope.go:117] "RemoveContainer" containerID="18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.939959 4929 scope.go:117] "RemoveContainer" containerID="63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee" Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.961003 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.976050 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7wb2m"] Nov 22 07:50:44 crc kubenswrapper[4929]: I1122 07:50:44.981913 4929 scope.go:117] "RemoveContainer" containerID="2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.030827 4929 scope.go:117] "RemoveContainer" containerID="18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d" Nov 22 07:50:45 crc kubenswrapper[4929]: E1122 07:50:45.031254 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d\": container with ID starting with 18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d not found: ID does not exist" containerID="18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.031302 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d"} err="failed to get container status \"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d\": rpc error: code = NotFound desc = could not find container \"18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d\": container with ID starting with 18a3c5fa6b04f10fbe1651823d70b6d08629a4b964579679211fb496139d257d not found: ID does not exist" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.031329 4929 scope.go:117] "RemoveContainer" containerID="63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee" Nov 22 07:50:45 crc kubenswrapper[4929]: E1122 07:50:45.031888 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee\": container with ID starting with 63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee not found: ID does not exist" containerID="63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.031926 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee"} err="failed to get container status \"63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee\": rpc error: code = NotFound desc = could not find container \"63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee\": container with ID starting with 63f37f4b8a7e647d4203715b54de799b69739ba20f94f11798f315bfbd2e5aee not found: ID does not exist" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.031954 4929 scope.go:117] "RemoveContainer" containerID="2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690" Nov 22 07:50:45 crc kubenswrapper[4929]: E1122 07:50:45.032256 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690\": container with ID starting with 2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690 not found: ID does not exist" containerID="2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.032280 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690"} err="failed to get container status \"2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690\": rpc error: code = NotFound desc = could not find container \"2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690\": container with ID starting with 2e83d6dec2408aa04eb384d66eeaf0479989571b0ae49251ee1e11e599b6d690 not found: ID does not exist" Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.617061 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.929388 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qmh7p" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="registry-server" containerID="cri-o://4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae" gracePeriod=2 Nov 22 07:50:45 crc kubenswrapper[4929]: I1122 07:50:45.965829 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" path="/var/lib/kubelet/pods/dd35db38-a8f4-40d9-805b-af72be930c0a/volumes" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.432268 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.583853 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities\") pod \"cb865101-8d1a-4930-b37d-73df5d40e618\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.584452 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities" (OuterVolumeSpecName: "utilities") pod "cb865101-8d1a-4930-b37d-73df5d40e618" (UID: "cb865101-8d1a-4930-b37d-73df5d40e618"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.584588 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tmfw\" (UniqueName: \"kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw\") pod \"cb865101-8d1a-4930-b37d-73df5d40e618\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.585336 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content\") pod \"cb865101-8d1a-4930-b37d-73df5d40e618\" (UID: \"cb865101-8d1a-4930-b37d-73df5d40e618\") " Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.585750 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.589075 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw" (OuterVolumeSpecName: "kube-api-access-5tmfw") pod "cb865101-8d1a-4930-b37d-73df5d40e618" (UID: "cb865101-8d1a-4930-b37d-73df5d40e618"). InnerVolumeSpecName "kube-api-access-5tmfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.647306 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb865101-8d1a-4930-b37d-73df5d40e618" (UID: "cb865101-8d1a-4930-b37d-73df5d40e618"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.687460 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb865101-8d1a-4930-b37d-73df5d40e618-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.687509 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tmfw\" (UniqueName: \"kubernetes.io/projected/cb865101-8d1a-4930-b37d-73df5d40e618-kube-api-access-5tmfw\") on node \"crc\" DevicePath \"\"" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.944478 4929 generic.go:334] "Generic (PLEG): container finished" podID="cb865101-8d1a-4930-b37d-73df5d40e618" containerID="4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae" exitCode=0 Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.944678 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerDied","Data":"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae"} Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.944708 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qmh7p" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.944882 4929 scope.go:117] "RemoveContainer" containerID="4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.944861 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qmh7p" event={"ID":"cb865101-8d1a-4930-b37d-73df5d40e618","Type":"ContainerDied","Data":"da6f8d4b230cc1c076cb55b5ef67f13521dd3c4cfa7fc60cb98e4ba183afd94b"} Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.986857 4929 scope.go:117] "RemoveContainer" containerID="241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649" Nov 22 07:50:46 crc kubenswrapper[4929]: I1122 07:50:46.988363 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.001573 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qmh7p"] Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.010160 4929 scope.go:117] "RemoveContainer" containerID="b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.049424 4929 scope.go:117] "RemoveContainer" containerID="4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae" Nov 22 07:50:47 crc kubenswrapper[4929]: E1122 07:50:47.049968 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae\": container with ID starting with 4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae not found: ID does not exist" containerID="4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.050006 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae"} err="failed to get container status \"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae\": rpc error: code = NotFound desc = could not find container \"4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae\": container with ID starting with 4488c5cf0133d5b8f454ec87c18271be7b9d47167440ea19ac212f0a6ac76dae not found: ID does not exist" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.050032 4929 scope.go:117] "RemoveContainer" containerID="241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649" Nov 22 07:50:47 crc kubenswrapper[4929]: E1122 07:50:47.050521 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649\": container with ID starting with 241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649 not found: ID does not exist" containerID="241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.050555 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649"} err="failed to get container status \"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649\": rpc error: code = NotFound desc = could not find container \"241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649\": container with ID starting with 241c1378d112747f8ab81be2c869fb89f7202365181fd51c14d0154f376ac649 not found: ID does not exist" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.050580 4929 scope.go:117] "RemoveContainer" containerID="b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc" Nov 22 07:50:47 crc kubenswrapper[4929]: E1122 07:50:47.050937 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc\": container with ID starting with b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc not found: ID does not exist" containerID="b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.050966 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc"} err="failed to get container status \"b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc\": rpc error: code = NotFound desc = could not find container \"b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc\": container with ID starting with b216c678f23ca3ab2f3c73e86d2ab6c9a9d27b59e079d95d72bf41a0a63175cc not found: ID does not exist" Nov 22 07:50:47 crc kubenswrapper[4929]: I1122 07:50:47.970274 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" path="/var/lib/kubelet/pods/cb865101-8d1a-4930-b37d-73df5d40e618/volumes" Nov 22 07:50:48 crc kubenswrapper[4929]: I1122 07:50:48.594651 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:50:48 crc kubenswrapper[4929]: I1122 07:50:48.595034 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:50:52 crc kubenswrapper[4929]: I1122 07:50:52.514272 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" probeResult="failure" output=< Nov 22 07:50:52 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 07:50:52 crc kubenswrapper[4929]: > Nov 22 07:51:01 crc kubenswrapper[4929]: I1122 07:51:01.518705 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:51:01 crc kubenswrapper[4929]: I1122 07:51:01.570444 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:51:02 crc kubenswrapper[4929]: I1122 07:51:02.944737 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:51:03 crc kubenswrapper[4929]: I1122 07:51:03.113871 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sq949" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" containerID="cri-o://05d89016841dbe3c5ab5031215bead7cec49f477e73ca138789611724d8f9edd" gracePeriod=2 Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.125816 4929 generic.go:334] "Generic (PLEG): container finished" podID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerID="05d89016841dbe3c5ab5031215bead7cec49f477e73ca138789611724d8f9edd" exitCode=0 Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.125862 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerDied","Data":"05d89016841dbe3c5ab5031215bead7cec49f477e73ca138789611724d8f9edd"} Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.125891 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sq949" event={"ID":"680b78e9-161f-47a3-8fe8-39fc6602b0f7","Type":"ContainerDied","Data":"e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7"} Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.125906 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e402a959340a1f264998ec3dbf40cbc5789069b54ba674947457d87b114170a7" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.221301 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.245356 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppbvm\" (UniqueName: \"kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm\") pod \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.245760 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities\") pod \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.245828 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content\") pod \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\" (UID: \"680b78e9-161f-47a3-8fe8-39fc6602b0f7\") " Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.247143 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities" (OuterVolumeSpecName: "utilities") pod "680b78e9-161f-47a3-8fe8-39fc6602b0f7" (UID: "680b78e9-161f-47a3-8fe8-39fc6602b0f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.269139 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm" (OuterVolumeSpecName: "kube-api-access-ppbvm") pod "680b78e9-161f-47a3-8fe8-39fc6602b0f7" (UID: "680b78e9-161f-47a3-8fe8-39fc6602b0f7"). InnerVolumeSpecName "kube-api-access-ppbvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.283324 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "680b78e9-161f-47a3-8fe8-39fc6602b0f7" (UID: "680b78e9-161f-47a3-8fe8-39fc6602b0f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.347906 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.347940 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppbvm\" (UniqueName: \"kubernetes.io/projected/680b78e9-161f-47a3-8fe8-39fc6602b0f7-kube-api-access-ppbvm\") on node \"crc\" DevicePath \"\"" Nov 22 07:51:04 crc kubenswrapper[4929]: I1122 07:51:04.347950 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/680b78e9-161f-47a3-8fe8-39fc6602b0f7-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 07:51:05 crc kubenswrapper[4929]: I1122 07:51:05.135354 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sq949" Nov 22 07:51:05 crc kubenswrapper[4929]: I1122 07:51:05.175436 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:51:05 crc kubenswrapper[4929]: I1122 07:51:05.185889 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sq949"] Nov 22 07:51:05 crc kubenswrapper[4929]: I1122 07:51:05.962915 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" path="/var/lib/kubelet/pods/680b78e9-161f-47a3-8fe8-39fc6602b0f7/volumes" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.644518 4929 scope.go:117] "RemoveContainer" containerID="30cad07f2dd9f9365feecc12d701858460cfd0c45f0e781fdfc51deecb822163" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.674664 4929 scope.go:117] "RemoveContainer" containerID="656d532293189756f2238e4e8348282eadcb49d1bf4f0e81b242df8a0f93f12f" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.723091 4929 scope.go:117] "RemoveContainer" containerID="8d8e41a5e003e60b99e57a4fe2d097fe40985da0cc3ce4066a0b88a86e0ff1ea" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.793868 4929 scope.go:117] "RemoveContainer" containerID="250f1418d2c1d6ef793feadac1b10f27be8417e419bf9b938cf1829f18ad9a11" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.841815 4929 scope.go:117] "RemoveContainer" containerID="fafbfdf30281c09cd4e03b4344099c0de6b1547f4c7fb118c23684d14d90e92f" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.895348 4929 scope.go:117] "RemoveContainer" containerID="6d780ed71d07b9618e7e34ad3770d037b80d4c84018063b1f0c5fdb8d3740ef8" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.942249 4929 scope.go:117] "RemoveContainer" containerID="4af3f6661afeac3bd634a380552e2a22b3c963c40c8d2721376c5cf67ea2ad45" Nov 22 07:51:09 crc kubenswrapper[4929]: I1122 07:51:09.979181 4929 scope.go:117] "RemoveContainer" containerID="c8153cd03e5828456a0438210663b0626a9287f391dd96f1367150607ed8e9ca" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.009175 4929 scope.go:117] "RemoveContainer" containerID="dd0956e821e6f19d11fb5cbf2c6af0f22aa842ff6dbf890895e4fbf64181588e" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.108819 4929 scope.go:117] "RemoveContainer" containerID="b980a08b1ce359fe5972912ad25bffd1d5cc9683e1b6d780ac37576e9a24439f" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.131888 4929 scope.go:117] "RemoveContainer" containerID="d814fab54abebc0695b9ebfc9536715873cfb3bc9355b8855fb00f4bd8f7acad" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.149311 4929 scope.go:117] "RemoveContainer" containerID="c8831cd02094c059d64e2b0b73727f8a38e750042b399fae2589c9c1ef247f6e" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.168292 4929 scope.go:117] "RemoveContainer" containerID="78fd18e6de2c93f1ef21e8e3fbe08d40ffb8d363a02ef32525a8195ddc90ec18" Nov 22 07:51:10 crc kubenswrapper[4929]: I1122 07:51:10.186609 4929 scope.go:117] "RemoveContainer" containerID="df53ad4ef7cc65b0c3079a17dc182f1fb4c41565d2152189f330e4cad1788a74" Nov 22 07:51:18 crc kubenswrapper[4929]: I1122 07:51:18.594874 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:51:18 crc kubenswrapper[4929]: I1122 07:51:18.595700 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:51:48 crc kubenswrapper[4929]: I1122 07:51:48.594425 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:51:48 crc kubenswrapper[4929]: I1122 07:51:48.595043 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:51:48 crc kubenswrapper[4929]: I1122 07:51:48.595092 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 07:51:48 crc kubenswrapper[4929]: I1122 07:51:48.595965 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 07:51:48 crc kubenswrapper[4929]: I1122 07:51:48.596028 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" gracePeriod=600 Nov 22 07:51:49 crc kubenswrapper[4929]: E1122 07:51:49.597125 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:51:49 crc kubenswrapper[4929]: I1122 07:51:49.612940 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" exitCode=0 Nov 22 07:51:49 crc kubenswrapper[4929]: I1122 07:51:49.612982 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c"} Nov 22 07:51:49 crc kubenswrapper[4929]: I1122 07:51:49.613014 4929 scope.go:117] "RemoveContainer" containerID="da6298c3cdc9e063ad233859e80f7899c9d155db55931a04a3aa81f018fc8e1e" Nov 22 07:51:49 crc kubenswrapper[4929]: I1122 07:51:49.613749 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:51:49 crc kubenswrapper[4929]: E1122 07:51:49.614087 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:00 crc kubenswrapper[4929]: I1122 07:52:00.947830 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:52:00 crc kubenswrapper[4929]: E1122 07:52:00.948806 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:08 crc kubenswrapper[4929]: I1122 07:52:08.040658 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-qnmbh"] Nov 22 07:52:08 crc kubenswrapper[4929]: I1122 07:52:08.049861 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-qnmbh"] Nov 22 07:52:09 crc kubenswrapper[4929]: I1122 07:52:09.961590 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee12ebde-6711-471d-b83c-649a0523ce63" path="/var/lib/kubelet/pods/ee12ebde-6711-471d-b83c-649a0523ce63/volumes" Nov 22 07:52:10 crc kubenswrapper[4929]: I1122 07:52:10.536897 4929 scope.go:117] "RemoveContainer" containerID="2540f1d1957de2e49d4a114ab9af4ada0b4550fe4476d830c90becc2d4907b2f" Nov 22 07:52:15 crc kubenswrapper[4929]: I1122 07:52:15.028706 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-7tkl7"] Nov 22 07:52:15 crc kubenswrapper[4929]: I1122 07:52:15.038672 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-7tkl7"] Nov 22 07:52:15 crc kubenswrapper[4929]: I1122 07:52:15.948718 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:52:15 crc kubenswrapper[4929]: E1122 07:52:15.949522 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:15 crc kubenswrapper[4929]: I1122 07:52:15.979109 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f311300e-5d96-4731-99a1-1e072280db75" path="/var/lib/kubelet/pods/f311300e-5d96-4731-99a1-1e072280db75/volumes" Nov 22 07:52:29 crc kubenswrapper[4929]: I1122 07:52:29.948038 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:52:29 crc kubenswrapper[4929]: E1122 07:52:29.949553 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:31 crc kubenswrapper[4929]: I1122 07:52:31.038903 4929 generic.go:334] "Generic (PLEG): container finished" podID="f3c0d097-15a2-494f-a1cc-2bde685efa87" containerID="4a0a38ab0e02d62955d90b79852d2db15e898b47138169421d7c158ef27aa7be" exitCode=0 Nov 22 07:52:31 crc kubenswrapper[4929]: I1122 07:52:31.038981 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" event={"ID":"f3c0d097-15a2-494f-a1cc-2bde685efa87","Type":"ContainerDied","Data":"4a0a38ab0e02d62955d90b79852d2db15e898b47138169421d7c158ef27aa7be"} Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.378642 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.495573 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts\") pod \"f3c0d097-15a2-494f-a1cc-2bde685efa87\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.495645 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8mhr\" (UniqueName: \"kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr\") pod \"f3c0d097-15a2-494f-a1cc-2bde685efa87\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.495795 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle\") pod \"f3c0d097-15a2-494f-a1cc-2bde685efa87\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.495892 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data\") pod \"f3c0d097-15a2-494f-a1cc-2bde685efa87\" (UID: \"f3c0d097-15a2-494f-a1cc-2bde685efa87\") " Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.501079 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts" (OuterVolumeSpecName: "scripts") pod "f3c0d097-15a2-494f-a1cc-2bde685efa87" (UID: "f3c0d097-15a2-494f-a1cc-2bde685efa87"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.503474 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr" (OuterVolumeSpecName: "kube-api-access-w8mhr") pod "f3c0d097-15a2-494f-a1cc-2bde685efa87" (UID: "f3c0d097-15a2-494f-a1cc-2bde685efa87"). InnerVolumeSpecName "kube-api-access-w8mhr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.535360 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data" (OuterVolumeSpecName: "config-data") pod "f3c0d097-15a2-494f-a1cc-2bde685efa87" (UID: "f3c0d097-15a2-494f-a1cc-2bde685efa87"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.535732 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3c0d097-15a2-494f-a1cc-2bde685efa87" (UID: "f3c0d097-15a2-494f-a1cc-2bde685efa87"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.598483 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.598517 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.598526 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8mhr\" (UniqueName: \"kubernetes.io/projected/f3c0d097-15a2-494f-a1cc-2bde685efa87-kube-api-access-w8mhr\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:32 crc kubenswrapper[4929]: I1122 07:52:32.598535 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3c0d097-15a2-494f-a1cc-2bde685efa87-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.058449 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" event={"ID":"f3c0d097-15a2-494f-a1cc-2bde685efa87","Type":"ContainerDied","Data":"c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a"} Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.058755 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c80a247f50a30207063b71399a4b44a6efee811f2b036d38cf66b06a73dcab0a" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.058512 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-s4kv5" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155162 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155564 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155587 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155607 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155614 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155623 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155632 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155642 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155649 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155668 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155675 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155686 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155694 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155706 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155713 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155738 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155746 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155758 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155764 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="extract-content" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155780 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" containerName="nova-cell0-conductor-db-sync" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155786 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" containerName="nova-cell0-conductor-db-sync" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155799 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155806 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155818 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155824 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: E1122 07:52:33.155838 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.155850 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="extract-utilities" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156059 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd35db38-a8f4-40d9-805b-af72be930c0a" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156072 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb865101-8d1a-4930-b37d-73df5d40e618" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156085 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" containerName="nova-cell0-conductor-db-sync" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156095 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bcb575a-cde1-458f-85d0-17ab7cf60522" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156112 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="680b78e9-161f-47a3-8fe8-39fc6602b0f7" containerName="registry-server" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.156854 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.159463 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mffzz" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.166269 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.167344 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.210811 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.210889 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzgfx\" (UniqueName: \"kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.211091 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.313402 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.313463 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzgfx\" (UniqueName: \"kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.313570 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.324090 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.324098 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.329822 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzgfx\" (UniqueName: \"kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx\") pod \"nova-cell0-conductor-0\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.475974 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:33 crc kubenswrapper[4929]: I1122 07:52:33.919854 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:52:34 crc kubenswrapper[4929]: I1122 07:52:34.071111 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"75239a0a-1b24-4e70-bea0-1e9a708f72fe","Type":"ContainerStarted","Data":"a8c61050a48c28f79b577d8714628913589d257bef7228d5d1bb704a992de30d"} Nov 22 07:52:35 crc kubenswrapper[4929]: I1122 07:52:35.080657 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"75239a0a-1b24-4e70-bea0-1e9a708f72fe","Type":"ContainerStarted","Data":"c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578"} Nov 22 07:52:35 crc kubenswrapper[4929]: I1122 07:52:35.081022 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:35 crc kubenswrapper[4929]: I1122 07:52:35.104232 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.104184012 podStartE2EDuration="2.104184012s" podCreationTimestamp="2025-11-22 07:52:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:35.093182408 +0000 UTC m=+2492.202636441" watchObservedRunningTime="2025-11-22 07:52:35.104184012 +0000 UTC m=+2492.213638035" Nov 22 07:52:43 crc kubenswrapper[4929]: I1122 07:52:43.509921 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.036989 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-bs8nq"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.040855 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.042498 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.042863 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.049631 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bs8nq"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.133348 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.133411 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md7dp\" (UniqueName: \"kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.133559 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.133593 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.226875 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.228098 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.230203 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.236828 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.236875 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.236955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.236991 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md7dp\" (UniqueName: \"kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.245336 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.245979 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.249880 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.257717 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.278284 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.280094 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.285655 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md7dp\" (UniqueName: \"kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp\") pod \"nova-cell0-cell-mapping-bs8nq\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.292614 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.300165 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.340161 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.340546 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.340901 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp5cn\" (UniqueName: \"kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.340945 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.340990 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdphl\" (UniqueName: \"kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.341024 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.341083 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.372970 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.379804 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.383135 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.397170 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.423139 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442523 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442593 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442613 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442647 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442679 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442719 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqg2l\" (UniqueName: \"kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442749 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp5cn\" (UniqueName: \"kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442771 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442805 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdphl\" (UniqueName: \"kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442831 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.442876 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.444132 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.448869 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.449549 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.450151 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.451407 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.487352 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.489310 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.502580 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp5cn\" (UniqueName: \"kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn\") pod \"nova-scheduler-0\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.504511 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.521282 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdphl\" (UniqueName: \"kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl\") pod \"nova-api-0\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.525888 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544447 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5wc9\" (UniqueName: \"kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544528 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544547 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544577 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544596 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544656 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqg2l\" (UniqueName: \"kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.544687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.549122 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.550038 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.559397 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.571463 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqg2l\" (UniqueName: \"kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l\") pod \"nova-metadata-0\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.580347 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.582859 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.619155 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652524 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652593 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652621 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652701 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652723 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652751 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652771 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652848 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4js85\" (UniqueName: \"kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.652871 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5wc9\" (UniqueName: \"kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.653422 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.658526 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.658660 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.673672 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.675725 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5wc9\" (UniqueName: \"kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9\") pod \"nova-cell1-novncproxy-0\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.759973 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.760024 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.760097 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4js85\" (UniqueName: \"kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.760175 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.760196 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.760274 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.761180 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.761608 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.762013 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.762359 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.763289 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.783647 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4js85\" (UniqueName: \"kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85\") pod \"dnsmasq-dns-78d445889f-78pqd\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.857555 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.882740 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.931884 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:44 crc kubenswrapper[4929]: I1122 07:52:44.947258 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:52:44 crc kubenswrapper[4929]: E1122 07:52:44.947530 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.100606 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-bs8nq"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.125920 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.206800 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"303b60e5-e78f-4519-a766-069904f135a8","Type":"ContainerStarted","Data":"6235d433bb6a3e81ae61144de5f66f5f8036fbca7edbf39dd03bc760517f2fab"} Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.216747 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bs8nq" event={"ID":"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa","Type":"ContainerStarted","Data":"590513f2d30dc6af9f15385d80edef79058389347f0e78c17ce49821831effad"} Nov 22 07:52:45 crc kubenswrapper[4929]: W1122 07:52:45.384942 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06a554db_ef62_42bb_955a_a0e69dbc575d.slice/crio-bb903b44fa33f1c87ae43515d7f23c398b3bbe170c31bbec14024a97f8b3207e WatchSource:0}: Error finding container bb903b44fa33f1c87ae43515d7f23c398b3bbe170c31bbec14024a97f8b3207e: Status 404 returned error can't find the container with id bb903b44fa33f1c87ae43515d7f23c398b3bbe170c31bbec14024a97f8b3207e Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.386547 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.640511 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.652028 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:45 crc kubenswrapper[4929]: W1122 07:52:45.743711 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd83a57e_98a0_42fe_9e71_0f9ecde27095.slice/crio-2e6b80ca131422e907bdc9e2ce1e688f77fbafe024e072cd5f3b5413825c24af WatchSource:0}: Error finding container 2e6b80ca131422e907bdc9e2ce1e688f77fbafe024e072cd5f3b5413825c24af: Status 404 returned error can't find the container with id 2e6b80ca131422e907bdc9e2ce1e688f77fbafe024e072cd5f3b5413825c24af Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.749437 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.813646 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tf9kg"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.815048 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.824168 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tf9kg"] Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.828427 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.828609 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.992091 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.992152 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl5jb\" (UniqueName: \"kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.992451 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:45 crc kubenswrapper[4929]: I1122 07:52:45.992480 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.095841 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.095911 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.095993 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.096025 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl5jb\" (UniqueName: \"kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.100141 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.100949 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.100958 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.114333 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl5jb\" (UniqueName: \"kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb\") pod \"nova-cell1-conductor-db-sync-tf9kg\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.144706 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.228604 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bs8nq" event={"ID":"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa","Type":"ContainerStarted","Data":"f740af74253fae41d9c7439cd2325c66c7cca6c486a28435a60a8232a2090e73"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.237510 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"00efdb20-390d-4535-b655-232832a018a6","Type":"ContainerStarted","Data":"8da170caadc5f39e30d174541c5e41f4bd4ef511902260de374186ae78bafe5a"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.242754 4929 generic.go:334] "Generic (PLEG): container finished" podID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerID="5be8ccc49deb29bbf54fe7e8af8bb2f04560ab22fd48a75ab2123d404f9b950b" exitCode=0 Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.242833 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d445889f-78pqd" event={"ID":"bd83a57e-98a0-42fe-9e71-0f9ecde27095","Type":"ContainerDied","Data":"5be8ccc49deb29bbf54fe7e8af8bb2f04560ab22fd48a75ab2123d404f9b950b"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.242865 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d445889f-78pqd" event={"ID":"bd83a57e-98a0-42fe-9e71-0f9ecde27095","Type":"ContainerStarted","Data":"2e6b80ca131422e907bdc9e2ce1e688f77fbafe024e072cd5f3b5413825c24af"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.247119 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerStarted","Data":"6f9313b0d89d502af7cd99ec42bdb99627351461ff9e8b4a02d7f05529f1ee13"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.275166 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-bs8nq" podStartSLOduration=2.275138941 podStartE2EDuration="2.275138941s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:46.25540396 +0000 UTC m=+2503.364857993" watchObservedRunningTime="2025-11-22 07:52:46.275138941 +0000 UTC m=+2503.384593124" Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.293783 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerStarted","Data":"bb903b44fa33f1c87ae43515d7f23c398b3bbe170c31bbec14024a97f8b3207e"} Nov 22 07:52:46 crc kubenswrapper[4929]: I1122 07:52:46.691426 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tf9kg"] Nov 22 07:52:47 crc kubenswrapper[4929]: I1122 07:52:47.314138 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d445889f-78pqd" event={"ID":"bd83a57e-98a0-42fe-9e71-0f9ecde27095","Type":"ContainerStarted","Data":"886f8c312bd53eecbb4a09e15aff8f6e7ae588655dc6f74e213a0045d8ced470"} Nov 22 07:52:47 crc kubenswrapper[4929]: I1122 07:52:47.314540 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:47 crc kubenswrapper[4929]: I1122 07:52:47.344131 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78d445889f-78pqd" podStartSLOduration=3.344103552 podStartE2EDuration="3.344103552s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:47.335046056 +0000 UTC m=+2504.444500069" watchObservedRunningTime="2025-11-22 07:52:47.344103552 +0000 UTC m=+2504.453557565" Nov 22 07:52:47 crc kubenswrapper[4929]: I1122 07:52:47.989886 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:48 crc kubenswrapper[4929]: I1122 07:52:48.020919 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:52:48 crc kubenswrapper[4929]: I1122 07:52:48.325859 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" event={"ID":"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff","Type":"ContainerStarted","Data":"7be002588d7b43a9936513af2609b0249fd8948ccf523eb63d09f1f7cadc71ed"} Nov 22 07:52:49 crc kubenswrapper[4929]: I1122 07:52:49.337113 4929 generic.go:334] "Generic (PLEG): container finished" podID="ebe2ed5d-6475-409a-a3a9-9a47d3de685b" containerID="e1a366137201b6689cd0e75aadbbf83f57a48e308149db83354111c219e52e1d" exitCode=0 Nov 22 07:52:49 crc kubenswrapper[4929]: I1122 07:52:49.337320 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tqf65" event={"ID":"ebe2ed5d-6475-409a-a3a9-9a47d3de685b","Type":"ContainerDied","Data":"e1a366137201b6689cd0e75aadbbf83f57a48e308149db83354111c219e52e1d"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.348552 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"303b60e5-e78f-4519-a766-069904f135a8","Type":"ContainerStarted","Data":"6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.351274 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerStarted","Data":"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.351316 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerStarted","Data":"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.351416 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-log" containerID="cri-o://056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" gracePeriod=30 Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.351441 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-metadata" containerID="cri-o://8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" gracePeriod=30 Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.359875 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" event={"ID":"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff","Type":"ContainerStarted","Data":"3f06f4053b60d4e141beeb4898f5c10ca17684c7370470eab5853c918287e253"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.363295 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerStarted","Data":"bccb7b314a22afc8eb3ba6d55bab66fa969a59a322c8bf7de2c054058730179a"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.363349 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerStarted","Data":"31f8bc34e200d4b678359380d3aa9d758b1a4d18eab016df9b922a7be5e500cc"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.368851 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.156032818 podStartE2EDuration="6.368829779s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="2025-11-22 07:52:45.142469596 +0000 UTC m=+2502.251923609" lastFinishedPulling="2025-11-22 07:52:49.355266557 +0000 UTC m=+2506.464720570" observedRunningTime="2025-11-22 07:52:50.365029945 +0000 UTC m=+2507.474483968" watchObservedRunningTime="2025-11-22 07:52:50.368829779 +0000 UTC m=+2507.478283792" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.372862 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="00efdb20-390d-4535-b655-232832a018a6" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://3d98668cbe08f8b110f81b720e5f0533b67270d72c51cd732a8d4f4e0c512727" gracePeriod=30 Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.373158 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"00efdb20-390d-4535-b655-232832a018a6","Type":"ContainerStarted","Data":"3d98668cbe08f8b110f81b720e5f0533b67270d72c51cd732a8d4f4e0c512727"} Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.413376 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" podStartSLOduration=5.413355297 podStartE2EDuration="5.413355297s" podCreationTimestamp="2025-11-22 07:52:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:50.386087609 +0000 UTC m=+2507.495541642" watchObservedRunningTime="2025-11-22 07:52:50.413355297 +0000 UTC m=+2507.522809320" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.415501 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.446067114 podStartE2EDuration="6.41549431s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="2025-11-22 07:52:45.38698808 +0000 UTC m=+2502.496442093" lastFinishedPulling="2025-11-22 07:52:49.356415276 +0000 UTC m=+2506.465869289" observedRunningTime="2025-11-22 07:52:50.404131138 +0000 UTC m=+2507.513585151" watchObservedRunningTime="2025-11-22 07:52:50.41549431 +0000 UTC m=+2507.524948313" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.435546 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.719600142 podStartE2EDuration="6.435520379s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="2025-11-22 07:52:45.64458477 +0000 UTC m=+2502.754038783" lastFinishedPulling="2025-11-22 07:52:49.360505007 +0000 UTC m=+2506.469959020" observedRunningTime="2025-11-22 07:52:50.426402332 +0000 UTC m=+2507.535856345" watchObservedRunningTime="2025-11-22 07:52:50.435520379 +0000 UTC m=+2507.544974392" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.454748 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.736552584 podStartE2EDuration="6.454724697s" podCreationTimestamp="2025-11-22 07:52:44 +0000 UTC" firstStartedPulling="2025-11-22 07:52:45.638900229 +0000 UTC m=+2502.748354242" lastFinishedPulling="2025-11-22 07:52:49.357072352 +0000 UTC m=+2506.466526355" observedRunningTime="2025-11-22 07:52:50.444541493 +0000 UTC m=+2507.553995506" watchObservedRunningTime="2025-11-22 07:52:50.454724697 +0000 UTC m=+2507.564178710" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.728886 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tqf65" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.918785 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b8dfm\" (UniqueName: \"kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm\") pod \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.918940 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config\") pod \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.918992 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle\") pod \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\" (UID: \"ebe2ed5d-6475-409a-a3a9-9a47d3de685b\") " Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.924418 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm" (OuterVolumeSpecName: "kube-api-access-b8dfm") pod "ebe2ed5d-6475-409a-a3a9-9a47d3de685b" (UID: "ebe2ed5d-6475-409a-a3a9-9a47d3de685b"). InnerVolumeSpecName "kube-api-access-b8dfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.966947 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config" (OuterVolumeSpecName: "config") pod "ebe2ed5d-6475-409a-a3a9-9a47d3de685b" (UID: "ebe2ed5d-6475-409a-a3a9-9a47d3de685b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:50 crc kubenswrapper[4929]: I1122 07:52:50.969398 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ebe2ed5d-6475-409a-a3a9-9a47d3de685b" (UID: "ebe2ed5d-6475-409a-a3a9-9a47d3de685b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.022035 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b8dfm\" (UniqueName: \"kubernetes.io/projected/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-kube-api-access-b8dfm\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.022081 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.022111 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebe2ed5d-6475-409a-a3a9-9a47d3de685b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.198236 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.327824 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data\") pod \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.327999 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle\") pod \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.328269 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqg2l\" (UniqueName: \"kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l\") pod \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.328547 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs\") pod \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\" (UID: \"0b453a1d-e70c-4726-af6c-8af76d9ec6f9\") " Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.329353 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs" (OuterVolumeSpecName: "logs") pod "0b453a1d-e70c-4726-af6c-8af76d9ec6f9" (UID: "0b453a1d-e70c-4726-af6c-8af76d9ec6f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.330680 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.331959 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l" (OuterVolumeSpecName: "kube-api-access-cqg2l") pod "0b453a1d-e70c-4726-af6c-8af76d9ec6f9" (UID: "0b453a1d-e70c-4726-af6c-8af76d9ec6f9"). InnerVolumeSpecName "kube-api-access-cqg2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.360066 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b453a1d-e70c-4726-af6c-8af76d9ec6f9" (UID: "0b453a1d-e70c-4726-af6c-8af76d9ec6f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.371170 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data" (OuterVolumeSpecName: "config-data") pod "0b453a1d-e70c-4726-af6c-8af76d9ec6f9" (UID: "0b453a1d-e70c-4726-af6c-8af76d9ec6f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.401582 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-tqf65" event={"ID":"ebe2ed5d-6475-409a-a3a9-9a47d3de685b","Type":"ContainerDied","Data":"6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f"} Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.401615 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-tqf65" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.401639 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6dd7dc6aaa9a12206d4536a3dfb2753b9befbc417a7ff5292368d0f50c44e90f" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.408711 4929 generic.go:334] "Generic (PLEG): container finished" podID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerID="8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" exitCode=0 Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.408753 4929 generic.go:334] "Generic (PLEG): container finished" podID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerID="056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" exitCode=143 Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.408923 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerDied","Data":"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae"} Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.408983 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerDied","Data":"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6"} Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.408999 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b453a1d-e70c-4726-af6c-8af76d9ec6f9","Type":"ContainerDied","Data":"6f9313b0d89d502af7cd99ec42bdb99627351461ff9e8b4a02d7f05529f1ee13"} Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.409025 4929 scope.go:117] "RemoveContainer" containerID="8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.409444 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.436820 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqg2l\" (UniqueName: \"kubernetes.io/projected/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-kube-api-access-cqg2l\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.437268 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.437344 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b453a1d-e70c-4726-af6c-8af76d9ec6f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.521692 4929 scope.go:117] "RemoveContainer" containerID="056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.558654 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.571355 4929 scope.go:117] "RemoveContainer" containerID="8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" Nov 22 07:52:51 crc kubenswrapper[4929]: E1122 07:52:51.575375 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae\": container with ID starting with 8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae not found: ID does not exist" containerID="8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.575446 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae"} err="failed to get container status \"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae\": rpc error: code = NotFound desc = could not find container \"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae\": container with ID starting with 8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae not found: ID does not exist" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.575494 4929 scope.go:117] "RemoveContainer" containerID="056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.579080 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:51 crc kubenswrapper[4929]: E1122 07:52:51.580339 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6\": container with ID starting with 056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6 not found: ID does not exist" containerID="056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.580378 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6"} err="failed to get container status \"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6\": rpc error: code = NotFound desc = could not find container \"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6\": container with ID starting with 056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6 not found: ID does not exist" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.580410 4929 scope.go:117] "RemoveContainer" containerID="8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.587091 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae"} err="failed to get container status \"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae\": rpc error: code = NotFound desc = could not find container \"8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae\": container with ID starting with 8d0a93a1d63640e94a49d93f890e5a23ff529393c6abb74a2cab615ef1f5c0ae not found: ID does not exist" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.587146 4929 scope.go:117] "RemoveContainer" containerID="056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.590771 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591001 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6"} err="failed to get container status \"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6\": rpc error: code = NotFound desc = could not find container \"056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6\": container with ID starting with 056ea8b9912aebc3d0962e98d8106826291c5834a2ed54d4a107ba82e6f19ad6 not found: ID does not exist" Nov 22 07:52:51 crc kubenswrapper[4929]: E1122 07:52:51.591261 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-metadata" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591280 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-metadata" Nov 22 07:52:51 crc kubenswrapper[4929]: E1122 07:52:51.591317 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-log" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591326 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-log" Nov 22 07:52:51 crc kubenswrapper[4929]: E1122 07:52:51.591356 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebe2ed5d-6475-409a-a3a9-9a47d3de685b" containerName="neutron-db-sync" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591362 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebe2ed5d-6475-409a-a3a9-9a47d3de685b" containerName="neutron-db-sync" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591577 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebe2ed5d-6475-409a-a3a9-9a47d3de685b" containerName="neutron-db-sync" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591602 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-log" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.591623 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" containerName="nova-metadata-metadata" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.713400 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.713444 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.713570 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.714861 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78d445889f-78pqd" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="dnsmasq-dns" containerID="cri-o://886f8c312bd53eecbb4a09e15aff8f6e7ae588655dc6f74e213a0045d8ced470" gracePeriod=10 Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.718255 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.718626 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.718872 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.741025 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.751513 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.760033 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762475 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762528 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762647 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762716 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762755 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqnck\" (UniqueName: \"kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762784 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762802 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762840 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkc6x\" (UniqueName: \"kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762859 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.762908 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.763006 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.781480 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.799696 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.803441 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-gggtz" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.803711 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.803911 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.805588 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.866113 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867288 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867326 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867383 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867403 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pmk47\" (UniqueName: \"kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867428 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867448 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867480 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867501 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867544 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867568 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867594 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqnck\" (UniqueName: \"kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867658 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867683 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867731 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkc6x\" (UniqueName: \"kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867749 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.867775 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.869328 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.869833 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.875109 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.875435 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.876420 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.892127 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.895971 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.896405 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.912944 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.923052 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkc6x\" (UniqueName: \"kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x\") pod \"nova-metadata-0\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " pod="openstack/nova-metadata-0" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.923819 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqnck\" (UniqueName: \"kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck\") pod \"dnsmasq-dns-757b4f8459-g62kx\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.974572 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.974654 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.974729 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pmk47\" (UniqueName: \"kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.974767 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:51 crc kubenswrapper[4929]: I1122 07:52:51.974820 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.002469 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.004947 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.005605 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.007528 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.007805 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b453a1d-e70c-4726-af6c-8af76d9ec6f9" path="/var/lib/kubelet/pods/0b453a1d-e70c-4726-af6c-8af76d9ec6f9/volumes" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.022569 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pmk47\" (UniqueName: \"kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47\") pod \"neutron-8677c466cb-74g99\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.027203 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.057006 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.081113 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.444393 4929 generic.go:334] "Generic (PLEG): container finished" podID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerID="886f8c312bd53eecbb4a09e15aff8f6e7ae588655dc6f74e213a0045d8ced470" exitCode=0 Nov 22 07:52:52 crc kubenswrapper[4929]: I1122 07:52:52.444472 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d445889f-78pqd" event={"ID":"bd83a57e-98a0-42fe-9e71-0f9ecde27095","Type":"ContainerDied","Data":"886f8c312bd53eecbb4a09e15aff8f6e7ae588655dc6f74e213a0045d8ced470"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.011052 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.021116 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:52:53 crc kubenswrapper[4929]: W1122 07:52:53.029378 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d531c11_4aa9_4fe9_ba25_ef5b3a28438d.slice/crio-6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4 WatchSource:0}: Error finding container 6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4: Status 404 returned error can't find the container with id 6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4 Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.133031 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:52:53 crc kubenswrapper[4929]: W1122 07:52:53.144347 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod864b28ed_cf71_451f_8af0_61d616497ee7.slice/crio-abd8e5f160c887e05426580237bfcbbdee0b52e5651f1d42f7a1e73edab8f2a0 WatchSource:0}: Error finding container abd8e5f160c887e05426580237bfcbbdee0b52e5651f1d42f7a1e73edab8f2a0: Status 404 returned error can't find the container with id abd8e5f160c887e05426580237bfcbbdee0b52e5651f1d42f7a1e73edab8f2a0 Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.183763 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.220830 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.220982 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.221006 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.221083 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.221114 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4js85\" (UniqueName: \"kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.221159 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc\") pod \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\" (UID: \"bd83a57e-98a0-42fe-9e71-0f9ecde27095\") " Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.230353 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85" (OuterVolumeSpecName: "kube-api-access-4js85") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "kube-api-access-4js85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.323584 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4js85\" (UniqueName: \"kubernetes.io/projected/bd83a57e-98a0-42fe-9e71-0f9ecde27095-kube-api-access-4js85\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.433757 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.469870 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.501127 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config" (OuterVolumeSpecName: "config") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.504708 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78d445889f-78pqd" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.504930 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.504980 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78d445889f-78pqd" event={"ID":"bd83a57e-98a0-42fe-9e71-0f9ecde27095","Type":"ContainerDied","Data":"2e6b80ca131422e907bdc9e2ce1e688f77fbafe024e072cd5f3b5413825c24af"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.505022 4929 scope.go:117] "RemoveContainer" containerID="886f8c312bd53eecbb4a09e15aff8f6e7ae588655dc6f74e213a0045d8ced470" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.514320 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerStarted","Data":"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.514576 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerStarted","Data":"6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.516498 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bd83a57e-98a0-42fe-9e71-0f9ecde27095" (UID: "bd83a57e-98a0-42fe-9e71-0f9ecde27095"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.525314 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerStarted","Data":"1866716cb3ea98aed3f7b065fc90653cb0e8804953f0257d4393f9fe41328ea7"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.529945 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.529982 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.529997 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.530011 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.530021 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd83a57e-98a0-42fe-9e71-0f9ecde27095-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.531736 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerStarted","Data":"abd8e5f160c887e05426580237bfcbbdee0b52e5651f1d42f7a1e73edab8f2a0"} Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.806695 4929 scope.go:117] "RemoveContainer" containerID="5be8ccc49deb29bbf54fe7e8af8bb2f04560ab22fd48a75ab2123d404f9b950b" Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.911725 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.920288 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78d445889f-78pqd"] Nov 22 07:52:53 crc kubenswrapper[4929]: I1122 07:52:53.961319 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" path="/var/lib/kubelet/pods/bd83a57e-98a0-42fe-9e71-0f9ecde27095/volumes" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.543787 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerStarted","Data":"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a"} Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.543830 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerStarted","Data":"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4"} Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.543930 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.548041 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerDied","Data":"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b"} Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.548548 4929 generic.go:334] "Generic (PLEG): container finished" podID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerID="f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b" exitCode=0 Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.551057 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerStarted","Data":"33ad8f45eca966b4bc444e20169f0f8954bea41087c6106825a8ead36908dea4"} Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.551301 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerStarted","Data":"93ea7a0a713835bae8765a6971ad9faf9ecf9ee5df41ef104401204539341def"} Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.569338 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-8677c466cb-74g99" podStartSLOduration=3.569314724 podStartE2EDuration="3.569314724s" podCreationTimestamp="2025-11-22 07:52:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:54.565167331 +0000 UTC m=+2511.674621344" watchObservedRunningTime="2025-11-22 07:52:54.569314724 +0000 UTC m=+2511.678768737" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.601080 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.601025033 podStartE2EDuration="3.601025033s" podCreationTimestamp="2025-11-22 07:52:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:54.588413419 +0000 UTC m=+2511.697867432" watchObservedRunningTime="2025-11-22 07:52:54.601025033 +0000 UTC m=+2511.710479046" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.653812 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.655415 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.675072 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.675121 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.691309 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.829333 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6d5d9c9567-b7l75"] Nov 22 07:52:54 crc kubenswrapper[4929]: E1122 07:52:54.829715 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="dnsmasq-dns" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.829731 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="dnsmasq-dns" Nov 22 07:52:54 crc kubenswrapper[4929]: E1122 07:52:54.829774 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="init" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.829781 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="init" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.829984 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd83a57e-98a0-42fe-9e71-0f9ecde27095" containerName="dnsmasq-dns" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.831127 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.832913 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.833129 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.861242 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6d5d9c9567-b7l75"] Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.883735 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.964874 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-ovndb-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.964948 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-internal-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.964972 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-httpd-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.965155 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjzpm\" (UniqueName: \"kubernetes.io/projected/b2c46869-321d-4fb4-bc99-a9f61f36a236-kube-api-access-pjzpm\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.965198 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.965417 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-public-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:54 crc kubenswrapper[4929]: I1122 07:52:54.965622 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-combined-ca-bundle\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068185 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjzpm\" (UniqueName: \"kubernetes.io/projected/b2c46869-321d-4fb4-bc99-a9f61f36a236-kube-api-access-pjzpm\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068265 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068337 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-public-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068421 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-combined-ca-bundle\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068487 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-ovndb-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068551 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-internal-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.068576 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-httpd-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.073035 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-public-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.073944 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-ovndb-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.074076 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-combined-ca-bundle\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.077184 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-httpd-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.077246 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-internal-tls-certs\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.077263 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b2c46869-321d-4fb4-bc99-a9f61f36a236-config\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.092922 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjzpm\" (UniqueName: \"kubernetes.io/projected/b2c46869-321d-4fb4-bc99-a9f61f36a236-kube-api-access-pjzpm\") pod \"neutron-6d5d9c9567-b7l75\" (UID: \"b2c46869-321d-4fb4-bc99-a9f61f36a236\") " pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.149539 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.597089 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerStarted","Data":"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8"} Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.599656 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.636703 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" podStartSLOduration=4.636675164 podStartE2EDuration="4.636675164s" podCreationTimestamp="2025-11-22 07:52:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:55.62163736 +0000 UTC m=+2512.731091373" watchObservedRunningTime="2025-11-22 07:52:55.636675164 +0000 UTC m=+2512.746129187" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.644394 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.757448 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.757486 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.216:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:52:55 crc kubenswrapper[4929]: I1122 07:52:55.792476 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6d5d9c9567-b7l75"] Nov 22 07:52:56 crc kubenswrapper[4929]: I1122 07:52:56.606219 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6d5d9c9567-b7l75" event={"ID":"b2c46869-321d-4fb4-bc99-a9f61f36a236","Type":"ContainerStarted","Data":"8fb93c75ad13b57a8f5b219dea825abe71d06fb4e1164bb2a65cfc7fe286810a"} Nov 22 07:52:56 crc kubenswrapper[4929]: I1122 07:52:56.606464 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6d5d9c9567-b7l75" event={"ID":"b2c46869-321d-4fb4-bc99-a9f61f36a236","Type":"ContainerStarted","Data":"07852c33fa82da17aa10432c70019fce522d7affcbdba837ec34a575c66e2b64"} Nov 22 07:52:56 crc kubenswrapper[4929]: I1122 07:52:56.608463 4929 generic.go:334] "Generic (PLEG): container finished" podID="6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" containerID="f740af74253fae41d9c7439cd2325c66c7cca6c486a28435a60a8232a2090e73" exitCode=0 Nov 22 07:52:56 crc kubenswrapper[4929]: I1122 07:52:56.608530 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bs8nq" event={"ID":"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa","Type":"ContainerDied","Data":"f740af74253fae41d9c7439cd2325c66c7cca6c486a28435a60a8232a2090e73"} Nov 22 07:52:57 crc kubenswrapper[4929]: I1122 07:52:57.058468 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:52:57 crc kubenswrapper[4929]: I1122 07:52:57.062183 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.083114 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.129866 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-md7dp\" (UniqueName: \"kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp\") pod \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.129924 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts\") pod \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.129995 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data\") pod \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.130054 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle\") pod \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\" (UID: \"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa\") " Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.135459 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp" (OuterVolumeSpecName: "kube-api-access-md7dp") pod "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" (UID: "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa"). InnerVolumeSpecName "kube-api-access-md7dp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.141101 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts" (OuterVolumeSpecName: "scripts") pod "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" (UID: "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.161142 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" (UID: "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.163320 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data" (OuterVolumeSpecName: "config-data") pod "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" (UID: "6d2e4e61-cf07-4acf-9ecf-e460a90b13aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.232517 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-md7dp\" (UniqueName: \"kubernetes.io/projected/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-kube-api-access-md7dp\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.232550 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.232559 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.232568 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.627642 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6d5d9c9567-b7l75" event={"ID":"b2c46869-321d-4fb4-bc99-a9f61f36a236","Type":"ContainerStarted","Data":"c3bf20cc4c5cb108832e46f6e43a1c87c8cdce38e6203972def5ef8ea4bc459f"} Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.628961 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.630279 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-bs8nq" event={"ID":"6d2e4e61-cf07-4acf-9ecf-e460a90b13aa","Type":"ContainerDied","Data":"590513f2d30dc6af9f15385d80edef79058389347f0e78c17ce49821831effad"} Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.630306 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="590513f2d30dc6af9f15385d80edef79058389347f0e78c17ce49821831effad" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.630346 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-bs8nq" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.669647 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6d5d9c9567-b7l75" podStartSLOduration=4.669588215 podStartE2EDuration="4.669588215s" podCreationTimestamp="2025-11-22 07:52:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:52:58.658679484 +0000 UTC m=+2515.768133497" watchObservedRunningTime="2025-11-22 07:52:58.669588215 +0000 UTC m=+2515.779042228" Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.809316 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.809978 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-log" containerID="cri-o://31f8bc34e200d4b678359380d3aa9d758b1a4d18eab016df9b922a7be5e500cc" gracePeriod=30 Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.810089 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-api" containerID="cri-o://bccb7b314a22afc8eb3ba6d55bab66fa969a59a322c8bf7de2c054058730179a" gracePeriod=30 Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.819593 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.819826 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="303b60e5-e78f-4519-a766-069904f135a8" containerName="nova-scheduler-scheduler" containerID="cri-o://6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" gracePeriod=30 Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.829167 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.829414 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-log" containerID="cri-o://33ad8f45eca966b4bc444e20169f0f8954bea41087c6106825a8ead36908dea4" gracePeriod=30 Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.829532 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-metadata" containerID="cri-o://93ea7a0a713835bae8765a6971ad9faf9ecf9ee5df41ef104401204539341def" gracePeriod=30 Nov 22 07:52:58 crc kubenswrapper[4929]: I1122 07:52:58.947519 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:52:58 crc kubenswrapper[4929]: E1122 07:52:58.947969 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.641412 4929 generic.go:334] "Generic (PLEG): container finished" podID="08cb740a-2cca-4763-bcce-82e75807b0df" containerID="93ea7a0a713835bae8765a6971ad9faf9ecf9ee5df41ef104401204539341def" exitCode=0 Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.641439 4929 generic.go:334] "Generic (PLEG): container finished" podID="08cb740a-2cca-4763-bcce-82e75807b0df" containerID="33ad8f45eca966b4bc444e20169f0f8954bea41087c6106825a8ead36908dea4" exitCode=143 Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.641463 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerDied","Data":"93ea7a0a713835bae8765a6971ad9faf9ecf9ee5df41ef104401204539341def"} Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.641515 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerDied","Data":"33ad8f45eca966b4bc444e20169f0f8954bea41087c6106825a8ead36908dea4"} Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.643046 4929 generic.go:334] "Generic (PLEG): container finished" podID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerID="31f8bc34e200d4b678359380d3aa9d758b1a4d18eab016df9b922a7be5e500cc" exitCode=143 Nov 22 07:52:59 crc kubenswrapper[4929]: I1122 07:52:59.643127 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerDied","Data":"31f8bc34e200d4b678359380d3aa9d758b1a4d18eab016df9b922a7be5e500cc"} Nov 22 07:52:59 crc kubenswrapper[4929]: E1122 07:52:59.657252 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:52:59 crc kubenswrapper[4929]: E1122 07:52:59.666760 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:52:59 crc kubenswrapper[4929]: E1122 07:52:59.673619 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:52:59 crc kubenswrapper[4929]: E1122 07:52:59.673706 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="303b60e5-e78f-4519-a766-069904f135a8" containerName="nova-scheduler-scheduler" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.314251 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.398737 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data\") pod \"08cb740a-2cca-4763-bcce-82e75807b0df\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.398881 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle\") pod \"08cb740a-2cca-4763-bcce-82e75807b0df\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.398962 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs\") pod \"08cb740a-2cca-4763-bcce-82e75807b0df\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.399026 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs\") pod \"08cb740a-2cca-4763-bcce-82e75807b0df\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.399078 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkc6x\" (UniqueName: \"kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x\") pod \"08cb740a-2cca-4763-bcce-82e75807b0df\" (UID: \"08cb740a-2cca-4763-bcce-82e75807b0df\") " Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.399373 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs" (OuterVolumeSpecName: "logs") pod "08cb740a-2cca-4763-bcce-82e75807b0df" (UID: "08cb740a-2cca-4763-bcce-82e75807b0df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.399740 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/08cb740a-2cca-4763-bcce-82e75807b0df-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.405352 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x" (OuterVolumeSpecName: "kube-api-access-lkc6x") pod "08cb740a-2cca-4763-bcce-82e75807b0df" (UID: "08cb740a-2cca-4763-bcce-82e75807b0df"). InnerVolumeSpecName "kube-api-access-lkc6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.434389 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08cb740a-2cca-4763-bcce-82e75807b0df" (UID: "08cb740a-2cca-4763-bcce-82e75807b0df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.436203 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data" (OuterVolumeSpecName: "config-data") pod "08cb740a-2cca-4763-bcce-82e75807b0df" (UID: "08cb740a-2cca-4763-bcce-82e75807b0df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.464418 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "08cb740a-2cca-4763-bcce-82e75807b0df" (UID: "08cb740a-2cca-4763-bcce-82e75807b0df"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.502376 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkc6x\" (UniqueName: \"kubernetes.io/projected/08cb740a-2cca-4763-bcce-82e75807b0df-kube-api-access-lkc6x\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.502426 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.502446 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.502463 4929 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/08cb740a-2cca-4763-bcce-82e75807b0df-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.666534 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"08cb740a-2cca-4763-bcce-82e75807b0df","Type":"ContainerDied","Data":"1866716cb3ea98aed3f7b065fc90653cb0e8804953f0257d4393f9fe41328ea7"} Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.666608 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.666623 4929 scope.go:117] "RemoveContainer" containerID="93ea7a0a713835bae8765a6971ad9faf9ecf9ee5df41ef104401204539341def" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.704152 4929 scope.go:117] "RemoveContainer" containerID="33ad8f45eca966b4bc444e20169f0f8954bea41087c6106825a8ead36908dea4" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.720078 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.731479 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.749533 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:01 crc kubenswrapper[4929]: E1122 07:53:01.750051 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" containerName="nova-manage" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750072 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" containerName="nova-manage" Nov 22 07:53:01 crc kubenswrapper[4929]: E1122 07:53:01.750114 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-metadata" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750124 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-metadata" Nov 22 07:53:01 crc kubenswrapper[4929]: E1122 07:53:01.750145 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-log" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750155 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-log" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750389 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-metadata" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750414 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" containerName="nova-metadata-log" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.750439 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" containerName="nova-manage" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.758422 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.760370 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.761617 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.762337 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.910201 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.911736 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.911840 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.911942 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jq2qp\" (UniqueName: \"kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.911970 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:01 crc kubenswrapper[4929]: I1122 07:53:01.959768 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08cb740a-2cca-4763-bcce-82e75807b0df" path="/var/lib/kubelet/pods/08cb740a-2cca-4763-bcce-82e75807b0df/volumes" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.014962 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.015138 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.015322 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.015433 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jq2qp\" (UniqueName: \"kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.015479 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.016248 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.022556 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.022568 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.023723 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.035847 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jq2qp\" (UniqueName: \"kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp\") pod \"nova-metadata-0\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.085248 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.088362 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.174169 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.177422 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="dnsmasq-dns" containerID="cri-o://6e0180514f75f36d512ef82c11723603a0b065dfa813eaedc7ccc5f2491902df" gracePeriod=10 Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.684172 4929 generic.go:334] "Generic (PLEG): container finished" podID="08943094-b174-4891-80da-386b43fa40c6" containerID="6e0180514f75f36d512ef82c11723603a0b065dfa813eaedc7ccc5f2491902df" exitCode=0 Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.684607 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" event={"ID":"08943094-b174-4891-80da-386b43fa40c6","Type":"ContainerDied","Data":"6e0180514f75f36d512ef82c11723603a0b065dfa813eaedc7ccc5f2491902df"} Nov 22 07:53:02 crc kubenswrapper[4929]: I1122 07:53:02.690582 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.703580 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerStarted","Data":"7efa7c7b0c32cc634c5f39f8aeccf28f36fcc38ae3df7e44b9a527dbfa8c0e4b"} Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.704109 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerStarted","Data":"65a85179b924d328591c49476b6883e41a73fd42ea308f28ae515c4b770f0298"} Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.708635 4929 generic.go:334] "Generic (PLEG): container finished" podID="303b60e5-e78f-4519-a766-069904f135a8" containerID="6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" exitCode=0 Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.708719 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"303b60e5-e78f-4519-a766-069904f135a8","Type":"ContainerDied","Data":"6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125"} Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.713999 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" event={"ID":"08943094-b174-4891-80da-386b43fa40c6","Type":"ContainerDied","Data":"92c7b3d61198dc0208ccb8baec836ec66a2759b816097f7cf08b04c57734d9cb"} Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.714044 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92c7b3d61198dc0208ccb8baec836ec66a2759b816097f7cf08b04c57734d9cb" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.718919 4929 generic.go:334] "Generic (PLEG): container finished" podID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerID="bccb7b314a22afc8eb3ba6d55bab66fa969a59a322c8bf7de2c054058730179a" exitCode=0 Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.718957 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerDied","Data":"bccb7b314a22afc8eb3ba6d55bab66fa969a59a322c8bf7de2c054058730179a"} Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.740967 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883247 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhbk9\" (UniqueName: \"kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883331 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883375 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883413 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883630 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.883730 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0\") pod \"08943094-b174-4891-80da-386b43fa40c6\" (UID: \"08943094-b174-4891-80da-386b43fa40c6\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.912319 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.912488 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9" (OuterVolumeSpecName: "kube-api-access-bhbk9") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "kube-api-access-bhbk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.959536 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.971367 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config" (OuterVolumeSpecName: "config") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.984824 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.985455 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.989897 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dp5cn\" (UniqueName: \"kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn\") pod \"303b60e5-e78f-4519-a766-069904f135a8\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.989946 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data\") pod \"06a554db-ef62-42bb-955a-a0e69dbc575d\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990027 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs\") pod \"06a554db-ef62-42bb-955a-a0e69dbc575d\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990045 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data\") pod \"303b60e5-e78f-4519-a766-069904f135a8\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990093 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle\") pod \"06a554db-ef62-42bb-955a-a0e69dbc575d\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990124 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle\") pod \"303b60e5-e78f-4519-a766-069904f135a8\" (UID: \"303b60e5-e78f-4519-a766-069904f135a8\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990141 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdphl\" (UniqueName: \"kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl\") pod \"06a554db-ef62-42bb-955a-a0e69dbc575d\" (UID: \"06a554db-ef62-42bb-955a-a0e69dbc575d\") " Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990641 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990653 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990663 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhbk9\" (UniqueName: \"kubernetes.io/projected/08943094-b174-4891-80da-386b43fa40c6-kube-api-access-bhbk9\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.990671 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.991447 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs" (OuterVolumeSpecName: "logs") pod "06a554db-ef62-42bb-955a-a0e69dbc575d" (UID: "06a554db-ef62-42bb-955a-a0e69dbc575d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.993507 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl" (OuterVolumeSpecName: "kube-api-access-pdphl") pod "06a554db-ef62-42bb-955a-a0e69dbc575d" (UID: "06a554db-ef62-42bb-955a-a0e69dbc575d"). InnerVolumeSpecName "kube-api-access-pdphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.994468 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn" (OuterVolumeSpecName: "kube-api-access-dp5cn") pod "303b60e5-e78f-4519-a766-069904f135a8" (UID: "303b60e5-e78f-4519-a766-069904f135a8"). InnerVolumeSpecName "kube-api-access-dp5cn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:03 crc kubenswrapper[4929]: I1122 07:53:03.996308 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.015232 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "08943094-b174-4891-80da-386b43fa40c6" (UID: "08943094-b174-4891-80da-386b43fa40c6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.043414 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data" (OuterVolumeSpecName: "config-data") pod "06a554db-ef62-42bb-955a-a0e69dbc575d" (UID: "06a554db-ef62-42bb-955a-a0e69dbc575d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.057168 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "303b60e5-e78f-4519-a766-069904f135a8" (UID: "303b60e5-e78f-4519-a766-069904f135a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.084931 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06a554db-ef62-42bb-955a-a0e69dbc575d" (UID: "06a554db-ef62-42bb-955a-a0e69dbc575d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.093989 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/06a554db-ef62-42bb-955a-a0e69dbc575d-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094124 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094137 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094149 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08943094-b174-4891-80da-386b43fa40c6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094191 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094202 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdphl\" (UniqueName: \"kubernetes.io/projected/06a554db-ef62-42bb-955a-a0e69dbc575d-kube-api-access-pdphl\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094222 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dp5cn\" (UniqueName: \"kubernetes.io/projected/303b60e5-e78f-4519-a766-069904f135a8-kube-api-access-dp5cn\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.094231 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06a554db-ef62-42bb-955a-a0e69dbc575d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.095617 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data" (OuterVolumeSpecName: "config-data") pod "303b60e5-e78f-4519-a766-069904f135a8" (UID: "303b60e5-e78f-4519-a766-069904f135a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.123778 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.124090 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerName="nova-cell0-conductor-conductor" containerID="cri-o://c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" gracePeriod=30 Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.133958 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.196458 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/303b60e5-e78f-4519-a766-069904f135a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.730158 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"303b60e5-e78f-4519-a766-069904f135a8","Type":"ContainerDied","Data":"6235d433bb6a3e81ae61144de5f66f5f8036fbca7edbf39dd03bc760517f2fab"} Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.730239 4929 scope.go:117] "RemoveContainer" containerID="6020c730d65286b179e61c765d04cc5abad6d08ce7fdd4225a077646b32a1125" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.730247 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.734854 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.734881 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.734917 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"06a554db-ef62-42bb-955a-a0e69dbc575d","Type":"ContainerDied","Data":"bb903b44fa33f1c87ae43515d7f23c398b3bbe170c31bbec14024a97f8b3207e"} Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.750830 4929 scope.go:117] "RemoveContainer" containerID="bccb7b314a22afc8eb3ba6d55bab66fa969a59a322c8bf7de2c054058730179a" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.800137 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.803583 4929 scope.go:117] "RemoveContainer" containerID="31f8bc34e200d4b678359380d3aa9d758b1a4d18eab016df9b922a7be5e500cc" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.815692 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.826711 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.853752 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: E1122 07:53:04.854257 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-log" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854279 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-log" Nov 22 07:53:04 crc kubenswrapper[4929]: E1122 07:53:04.854318 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-api" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854326 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-api" Nov 22 07:53:04 crc kubenswrapper[4929]: E1122 07:53:04.854339 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="dnsmasq-dns" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854347 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="dnsmasq-dns" Nov 22 07:53:04 crc kubenswrapper[4929]: E1122 07:53:04.854363 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="303b60e5-e78f-4519-a766-069904f135a8" containerName="nova-scheduler-scheduler" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854371 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="303b60e5-e78f-4519-a766-069904f135a8" containerName="nova-scheduler-scheduler" Nov 22 07:53:04 crc kubenswrapper[4929]: E1122 07:53:04.854396 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="init" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854403 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="init" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854643 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="dnsmasq-dns" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854660 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-log" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854685 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="303b60e5-e78f-4519-a766-069904f135a8" containerName="nova-scheduler-scheduler" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.854696 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" containerName="nova-api-api" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.855934 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.860645 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.871490 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-795f4db4bc-jr28r"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.885935 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.893443 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.909993 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.910248 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.910280 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.910346 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbmgm\" (UniqueName: \"kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.910976 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.919758 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.921136 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.924627 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 07:53:04 crc kubenswrapper[4929]: I1122 07:53:04.936467 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.013061 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.013197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.014327 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.014395 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbmgm\" (UniqueName: \"kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.015085 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.015319 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc7ds\" (UniqueName: \"kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.015602 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.015980 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.020051 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.021534 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.043296 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbmgm\" (UniqueName: \"kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm\") pod \"nova-api-0\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.117960 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc7ds\" (UniqueName: \"kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.118160 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.118199 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.122710 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.122945 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.141945 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc7ds\" (UniqueName: \"kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds\") pod \"nova-scheduler-0\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.180393 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.237992 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.643407 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:53:05 crc kubenswrapper[4929]: W1122 07:53:05.652188 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a828f0a_c50e_4f76_829b_543df4ac95e9.slice/crio-59747d559209246afa62f890fd02a2dd69afb51d1c117ab8a8b980dba14025ba WatchSource:0}: Error finding container 59747d559209246afa62f890fd02a2dd69afb51d1c117ab8a8b980dba14025ba: Status 404 returned error can't find the container with id 59747d559209246afa62f890fd02a2dd69afb51d1c117ab8a8b980dba14025ba Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.688199 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:05 crc kubenswrapper[4929]: W1122 07:53:05.693720 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4f6babc_2993_4999_a2e2_58ea1401d2ff.slice/crio-08a3102a3079f4821ba1bfe7299dc79581f3740a0b84a4b5d597b8dcf4669356 WatchSource:0}: Error finding container 08a3102a3079f4821ba1bfe7299dc79581f3740a0b84a4b5d597b8dcf4669356: Status 404 returned error can't find the container with id 08a3102a3079f4821ba1bfe7299dc79581f3740a0b84a4b5d597b8dcf4669356 Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.755308 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4a828f0a-c50e-4f76-829b-543df4ac95e9","Type":"ContainerStarted","Data":"59747d559209246afa62f890fd02a2dd69afb51d1c117ab8a8b980dba14025ba"} Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.771601 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerStarted","Data":"acbba023a97cc137a0e856c756aa9a0da335e569ba611eb250dbb13cdf63ae83"} Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.781413 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerStarted","Data":"08a3102a3079f4821ba1bfe7299dc79581f3740a0b84a4b5d597b8dcf4669356"} Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.961087 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06a554db-ef62-42bb-955a-a0e69dbc575d" path="/var/lib/kubelet/pods/06a554db-ef62-42bb-955a-a0e69dbc575d/volumes" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.962097 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08943094-b174-4891-80da-386b43fa40c6" path="/var/lib/kubelet/pods/08943094-b174-4891-80da-386b43fa40c6/volumes" Nov 22 07:53:05 crc kubenswrapper[4929]: I1122 07:53:05.962901 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="303b60e5-e78f-4519-a766-069904f135a8" path="/var/lib/kubelet/pods/303b60e5-e78f-4519-a766-069904f135a8/volumes" Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.047479 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.047758 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-central-agent" containerID="cri-o://080cb2e25a279daf203e4da85aa709a65e6cff2f89d6e42a1c9c7b5b61e65730" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.047877 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="sg-core" containerID="cri-o://6cd568d3c19d081d03ca23ea9f029dc2bf93d2ca641f62ecfdff0851876da25e" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.047924 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-notification-agent" containerID="cri-o://a92fab0f52df75425ebdd4084e8dae90df47034e9f7da6a599d710d5f9a0651f" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.047919 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="proxy-httpd" containerID="cri-o://329efd441e443fb2ef0c06f7ff68c7edd6bc9f2c35c907c14a97c01cf45a77d6" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.803506 4929 generic.go:334] "Generic (PLEG): container finished" podID="7a562fe7-c613-46c8-81c5-37795061c60b" containerID="329efd441e443fb2ef0c06f7ff68c7edd6bc9f2c35c907c14a97c01cf45a77d6" exitCode=0 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.803796 4929 generic.go:334] "Generic (PLEG): container finished" podID="7a562fe7-c613-46c8-81c5-37795061c60b" containerID="6cd568d3c19d081d03ca23ea9f029dc2bf93d2ca641f62ecfdff0851876da25e" exitCode=2 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.803633 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerDied","Data":"329efd441e443fb2ef0c06f7ff68c7edd6bc9f2c35c907c14a97c01cf45a77d6"} Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.803892 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerDied","Data":"6cd568d3c19d081d03ca23ea9f029dc2bf93d2ca641f62ecfdff0851876da25e"} Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.808117 4929 generic.go:334] "Generic (PLEG): container finished" podID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerID="c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" exitCode=0 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.808154 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"75239a0a-1b24-4e70-bea0-1e9a708f72fe","Type":"ContainerDied","Data":"c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578"} Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.808279 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-log" containerID="cri-o://7efa7c7b0c32cc634c5f39f8aeccf28f36fcc38ae3df7e44b9a527dbfa8c0e4b" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.808359 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-metadata" containerID="cri-o://acbba023a97cc137a0e856c756aa9a0da335e569ba611eb250dbb13cdf63ae83" gracePeriod=30 Nov 22 07:53:06 crc kubenswrapper[4929]: I1122 07:53:06.840690 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=5.840673345 podStartE2EDuration="5.840673345s" podCreationTimestamp="2025-11-22 07:53:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:06.835766813 +0000 UTC m=+2523.945220826" watchObservedRunningTime="2025-11-22 07:53:06.840673345 +0000 UTC m=+2523.950127358" Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.085991 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.086117 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.821569 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerStarted","Data":"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4"} Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.824795 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4a828f0a-c50e-4f76-829b-543df4ac95e9","Type":"ContainerStarted","Data":"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d"} Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.827067 4929 generic.go:334] "Generic (PLEG): container finished" podID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerID="7efa7c7b0c32cc634c5f39f8aeccf28f36fcc38ae3df7e44b9a527dbfa8c0e4b" exitCode=143 Nov 22 07:53:07 crc kubenswrapper[4929]: I1122 07:53:07.827102 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerDied","Data":"7efa7c7b0c32cc634c5f39f8aeccf28f36fcc38ae3df7e44b9a527dbfa8c0e4b"} Nov 22 07:53:08 crc kubenswrapper[4929]: E1122 07:53:08.477899 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578 is running failed: container process not found" containerID="c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 22 07:53:08 crc kubenswrapper[4929]: E1122 07:53:08.478440 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578 is running failed: container process not found" containerID="c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 22 07:53:08 crc kubenswrapper[4929]: E1122 07:53:08.478870 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578 is running failed: container process not found" containerID="c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 22 07:53:08 crc kubenswrapper[4929]: E1122 07:53:08.478897 4929 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerName="nova-cell0-conductor-conductor" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.495017 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.516726 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-795f4db4bc-jr28r" podUID="08943094-b174-4891-80da-386b43fa40c6" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.196:5353: i/o timeout" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.613121 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle\") pod \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.613549 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzgfx\" (UniqueName: \"kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx\") pod \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.614157 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data\") pod \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\" (UID: \"75239a0a-1b24-4e70-bea0-1e9a708f72fe\") " Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.617425 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx" (OuterVolumeSpecName: "kube-api-access-kzgfx") pod "75239a0a-1b24-4e70-bea0-1e9a708f72fe" (UID: "75239a0a-1b24-4e70-bea0-1e9a708f72fe"). InnerVolumeSpecName "kube-api-access-kzgfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.658599 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data" (OuterVolumeSpecName: "config-data") pod "75239a0a-1b24-4e70-bea0-1e9a708f72fe" (UID: "75239a0a-1b24-4e70-bea0-1e9a708f72fe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.685742 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75239a0a-1b24-4e70-bea0-1e9a708f72fe" (UID: "75239a0a-1b24-4e70-bea0-1e9a708f72fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.717242 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.717274 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzgfx\" (UniqueName: \"kubernetes.io/projected/75239a0a-1b24-4e70-bea0-1e9a708f72fe-kube-api-access-kzgfx\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.717286 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75239a0a-1b24-4e70-bea0-1e9a708f72fe-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.844284 4929 generic.go:334] "Generic (PLEG): container finished" podID="7a562fe7-c613-46c8-81c5-37795061c60b" containerID="080cb2e25a279daf203e4da85aa709a65e6cff2f89d6e42a1c9c7b5b61e65730" exitCode=0 Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.844355 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerDied","Data":"080cb2e25a279daf203e4da85aa709a65e6cff2f89d6e42a1c9c7b5b61e65730"} Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.846616 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"75239a0a-1b24-4e70-bea0-1e9a708f72fe","Type":"ContainerDied","Data":"a8c61050a48c28f79b577d8714628913589d257bef7228d5d1bb704a992de30d"} Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.846661 4929 scope.go:117] "RemoveContainer" containerID="c7fbb594ec3fab471f293d07910d36cee023ae429daf31ba5c09f10bb448b578" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.846857 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.856810 4929 generic.go:334] "Generic (PLEG): container finished" podID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerID="acbba023a97cc137a0e856c756aa9a0da335e569ba611eb250dbb13cdf63ae83" exitCode=0 Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.858435 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerDied","Data":"acbba023a97cc137a0e856c756aa9a0da335e569ba611eb250dbb13cdf63ae83"} Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.906690 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=4.906664885 podStartE2EDuration="4.906664885s" podCreationTimestamp="2025-11-22 07:53:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:08.886025451 +0000 UTC m=+2525.995479464" watchObservedRunningTime="2025-11-22 07:53:08.906664885 +0000 UTC m=+2526.016118898" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.910390 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.931370 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.943986 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:08 crc kubenswrapper[4929]: E1122 07:53:08.944691 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerName="nova-cell0-conductor-conductor" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.944712 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerName="nova-cell0-conductor-conductor" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.944985 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" containerName="nova-cell0-conductor-conductor" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.945698 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.950043 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 07:53:08 crc kubenswrapper[4929]: I1122 07:53:08.961493 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.015074 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025187 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jq2qp\" (UniqueName: \"kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp\") pod \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025307 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs\") pod \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025332 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs\") pod \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025356 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle\") pod \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025391 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data\") pod \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\" (UID: \"8fbd3c1f-519a-4c56-baf7-0c46b93453b3\") " Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025713 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025757 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.025782 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z999k\" (UniqueName: \"kubernetes.io/projected/c3409b70-c916-47cc-92f5-e005ef66f2b8-kube-api-access-z999k\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.026839 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs" (OuterVolumeSpecName: "logs") pod "8fbd3c1f-519a-4c56-baf7-0c46b93453b3" (UID: "8fbd3c1f-519a-4c56-baf7-0c46b93453b3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.044996 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp" (OuterVolumeSpecName: "kube-api-access-jq2qp") pod "8fbd3c1f-519a-4c56-baf7-0c46b93453b3" (UID: "8fbd3c1f-519a-4c56-baf7-0c46b93453b3"). InnerVolumeSpecName "kube-api-access-jq2qp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.061583 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data" (OuterVolumeSpecName: "config-data") pod "8fbd3c1f-519a-4c56-baf7-0c46b93453b3" (UID: "8fbd3c1f-519a-4c56-baf7-0c46b93453b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.067846 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8fbd3c1f-519a-4c56-baf7-0c46b93453b3" (UID: "8fbd3c1f-519a-4c56-baf7-0c46b93453b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.087602 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "8fbd3c1f-519a-4c56-baf7-0c46b93453b3" (UID: "8fbd3c1f-519a-4c56-baf7-0c46b93453b3"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.128732 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.128866 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.128910 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z999k\" (UniqueName: \"kubernetes.io/projected/c3409b70-c916-47cc-92f5-e005ef66f2b8-kube-api-access-z999k\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.129162 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jq2qp\" (UniqueName: \"kubernetes.io/projected/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-kube-api-access-jq2qp\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.129182 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.129191 4929 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.129201 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.129224 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fbd3c1f-519a-4c56-baf7-0c46b93453b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.133040 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.133160 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3409b70-c916-47cc-92f5-e005ef66f2b8-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.147768 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z999k\" (UniqueName: \"kubernetes.io/projected/c3409b70-c916-47cc-92f5-e005ef66f2b8-kube-api-access-z999k\") pod \"nova-cell0-conductor-0\" (UID: \"c3409b70-c916-47cc-92f5-e005ef66f2b8\") " pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.301117 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.753228 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.868280 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c3409b70-c916-47cc-92f5-e005ef66f2b8","Type":"ContainerStarted","Data":"42ab4b4f72911b9541c7aa55e9ef7f8559a8d8d7593c9352e70add510b279c14"} Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.870460 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8fbd3c1f-519a-4c56-baf7-0c46b93453b3","Type":"ContainerDied","Data":"65a85179b924d328591c49476b6883e41a73fd42ea308f28ae515c4b770f0298"} Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.870511 4929 scope.go:117] "RemoveContainer" containerID="acbba023a97cc137a0e856c756aa9a0da335e569ba611eb250dbb13cdf63ae83" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.870470 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.912510 4929 scope.go:117] "RemoveContainer" containerID="7efa7c7b0c32cc634c5f39f8aeccf28f36fcc38ae3df7e44b9a527dbfa8c0e4b" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.927617 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.962491 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75239a0a-1b24-4e70-bea0-1e9a708f72fe" path="/var/lib/kubelet/pods/75239a0a-1b24-4e70-bea0-1e9a708f72fe/volumes" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963004 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963034 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:09 crc kubenswrapper[4929]: E1122 07:53:09.963335 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-log" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963351 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-log" Nov 22 07:53:09 crc kubenswrapper[4929]: E1122 07:53:09.963371 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-metadata" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963377 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-metadata" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963581 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-metadata" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.963598 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" containerName="nova-metadata-log" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.964662 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.966846 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.967685 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 07:53:09 crc kubenswrapper[4929]: I1122 07:53:09.971891 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.047540 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.047811 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.047912 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.047954 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhlpt\" (UniqueName: \"kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.047990 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.149836 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.150323 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.150362 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhlpt\" (UniqueName: \"kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.150395 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.150441 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.151127 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.156471 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.157099 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.160806 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.169919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhlpt\" (UniqueName: \"kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt\") pod \"nova-metadata-0\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.238890 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.307084 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.619638 4929 scope.go:117] "RemoveContainer" containerID="4b1b298eb5c447935d1e4943ddb38c83063db90eecf04053950540937ea089a3" Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.788452 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.882994 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerStarted","Data":"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c"} Nov 22 07:53:10 crc kubenswrapper[4929]: W1122 07:53:10.929694 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod573b60bf_1459_4e7f_9c71_f453e0d01074.slice/crio-10f865caffe753521ff9106580f6cfe084b5c4d3527be6cefcca6a44e913ac8c WatchSource:0}: Error finding container 10f865caffe753521ff9106580f6cfe084b5c4d3527be6cefcca6a44e913ac8c: Status 404 returned error can't find the container with id 10f865caffe753521ff9106580f6cfe084b5c4d3527be6cefcca6a44e913ac8c Nov 22 07:53:10 crc kubenswrapper[4929]: I1122 07:53:10.948402 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:53:10 crc kubenswrapper[4929]: E1122 07:53:10.948681 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:53:11 crc kubenswrapper[4929]: I1122 07:53:11.893573 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"c3409b70-c916-47cc-92f5-e005ef66f2b8","Type":"ContainerStarted","Data":"f2f0d928b4cc8dd82a0f064d5cc4f83cd8eda255dd45db9b3d0102329c280f28"} Nov 22 07:53:11 crc kubenswrapper[4929]: I1122 07:53:11.895610 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerStarted","Data":"10f865caffe753521ff9106580f6cfe084b5c4d3527be6cefcca6a44e913ac8c"} Nov 22 07:53:11 crc kubenswrapper[4929]: I1122 07:53:11.920902 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=7.920881831 podStartE2EDuration="7.920881831s" podCreationTimestamp="2025-11-22 07:53:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:11.917171908 +0000 UTC m=+2529.026625931" watchObservedRunningTime="2025-11-22 07:53:11.920881831 +0000 UTC m=+2529.030335844" Nov 22 07:53:11 crc kubenswrapper[4929]: I1122 07:53:11.965987 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fbd3c1f-519a-4c56-baf7-0c46b93453b3" path="/var/lib/kubelet/pods/8fbd3c1f-519a-4c56-baf7-0c46b93453b3/volumes" Nov 22 07:53:12 crc kubenswrapper[4929]: I1122 07:53:12.908494 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerStarted","Data":"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd"} Nov 22 07:53:12 crc kubenswrapper[4929]: I1122 07:53:12.908905 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:12 crc kubenswrapper[4929]: I1122 07:53:12.931250 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=4.931205012 podStartE2EDuration="4.931205012s" podCreationTimestamp="2025-11-22 07:53:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:12.92508519 +0000 UTC m=+2530.034539233" watchObservedRunningTime="2025-11-22 07:53:12.931205012 +0000 UTC m=+2530.040659035" Nov 22 07:53:15 crc kubenswrapper[4929]: I1122 07:53:15.181597 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:53:15 crc kubenswrapper[4929]: I1122 07:53:15.182002 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:53:15 crc kubenswrapper[4929]: I1122 07:53:15.239148 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 07:53:15 crc kubenswrapper[4929]: I1122 07:53:15.281512 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 07:53:15 crc kubenswrapper[4929]: I1122 07:53:15.991869 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 07:53:16 crc kubenswrapper[4929]: I1122 07:53:16.264433 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.226:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:16 crc kubenswrapper[4929]: I1122 07:53:16.264477 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.226:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:16 crc kubenswrapper[4929]: I1122 07:53:16.972266 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerDied","Data":"a92fab0f52df75425ebdd4084e8dae90df47034e9f7da6a599d710d5f9a0651f"} Nov 22 07:53:16 crc kubenswrapper[4929]: I1122 07:53:16.972344 4929 generic.go:334] "Generic (PLEG): container finished" podID="7a562fe7-c613-46c8-81c5-37795061c60b" containerID="a92fab0f52df75425ebdd4084e8dae90df47034e9f7da6a599d710d5f9a0651f" exitCode=0 Nov 22 07:53:16 crc kubenswrapper[4929]: I1122 07:53:16.975521 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerStarted","Data":"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43"} Nov 22 07:53:17 crc kubenswrapper[4929]: I1122 07:53:17.004130 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=8.004103622 podStartE2EDuration="8.004103622s" podCreationTimestamp="2025-11-22 07:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:16.994723779 +0000 UTC m=+2534.104177832" watchObservedRunningTime="2025-11-22 07:53:17.004103622 +0000 UTC m=+2534.113557635" Nov 22 07:53:18 crc kubenswrapper[4929]: I1122 07:53:18.971270 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:18 crc kubenswrapper[4929]: I1122 07:53:18.996089 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7a562fe7-c613-46c8-81c5-37795061c60b","Type":"ContainerDied","Data":"e82d81ca244fc9e3390a9ac2480c149ce427a2684cb13753ef903eb2da3ea618"} Nov 22 07:53:18 crc kubenswrapper[4929]: I1122 07:53:18.996157 4929 scope.go:117] "RemoveContainer" containerID="329efd441e443fb2ef0c06f7ff68c7edd6bc9f2c35c907c14a97c01cf45a77d6" Nov 22 07:53:18 crc kubenswrapper[4929]: I1122 07:53:18.996388 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.020927 4929 scope.go:117] "RemoveContainer" containerID="6cd568d3c19d081d03ca23ea9f029dc2bf93d2ca641f62ecfdff0851876da25e" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.040579 4929 scope.go:117] "RemoveContainer" containerID="a92fab0f52df75425ebdd4084e8dae90df47034e9f7da6a599d710d5f9a0651f" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.059331 4929 scope.go:117] "RemoveContainer" containerID="080cb2e25a279daf203e4da85aa709a65e6cff2f89d6e42a1c9c7b5b61e65730" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136280 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fbmwl\" (UniqueName: \"kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136349 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136392 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136425 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136534 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136585 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136646 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.136755 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd\") pod \"7a562fe7-c613-46c8-81c5-37795061c60b\" (UID: \"7a562fe7-c613-46c8-81c5-37795061c60b\") " Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.138901 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.139380 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.142991 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl" (OuterVolumeSpecName: "kube-api-access-fbmwl") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "kube-api-access-fbmwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.144435 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts" (OuterVolumeSpecName: "scripts") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.168667 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.191697 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.238837 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.239044 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.239122 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fbmwl\" (UniqueName: \"kubernetes.io/projected/7a562fe7-c613-46c8-81c5-37795061c60b-kube-api-access-fbmwl\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.239243 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7a562fe7-c613-46c8-81c5-37795061c60b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.239319 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.239390 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.248304 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.248295 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data" (OuterVolumeSpecName: "config-data") pod "7a562fe7-c613-46c8-81c5-37795061c60b" (UID: "7a562fe7-c613-46c8-81c5-37795061c60b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.343128 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.343540 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.343575 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a562fe7-c613-46c8-81c5-37795061c60b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.354330 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.368992 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:19 crc kubenswrapper[4929]: E1122 07:53:19.371189 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-central-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371233 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-central-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: E1122 07:53:19.371247 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-notification-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371253 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-notification-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: E1122 07:53:19.371264 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="proxy-httpd" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371270 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="proxy-httpd" Nov 22 07:53:19 crc kubenswrapper[4929]: E1122 07:53:19.371301 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="sg-core" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371308 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="sg-core" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371494 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-notification-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371511 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="ceilometer-central-agent" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371525 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="proxy-httpd" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.371540 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" containerName="sg-core" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.374521 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.374718 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.377149 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.377559 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.383039 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.383108 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547355 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547433 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547494 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547660 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547733 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sgjm\" (UniqueName: \"kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.547819 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.548070 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.548108 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.649709 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.649801 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.649870 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.649905 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.650656 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.649927 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sgjm\" (UniqueName: \"kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.650773 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.650874 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.650911 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.651145 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.653914 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.654273 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.655022 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.655013 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.655149 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.676865 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sgjm\" (UniqueName: \"kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm\") pod \"ceilometer-0\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.702978 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:19 crc kubenswrapper[4929]: I1122 07:53:19.960973 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a562fe7-c613-46c8-81c5-37795061c60b" path="/var/lib/kubelet/pods/7a562fe7-c613-46c8-81c5-37795061c60b/volumes" Nov 22 07:53:20 crc kubenswrapper[4929]: I1122 07:53:20.220492 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:20 crc kubenswrapper[4929]: I1122 07:53:20.307642 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:53:20 crc kubenswrapper[4929]: I1122 07:53:20.307696 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 07:53:20 crc kubenswrapper[4929]: I1122 07:53:20.307712 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 07:53:20 crc kubenswrapper[4929]: I1122 07:53:20.307958 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.022156 4929 generic.go:334] "Generic (PLEG): container finished" podID="00efdb20-390d-4535-b655-232832a018a6" containerID="3d98668cbe08f8b110f81b720e5f0533b67270d72c51cd732a8d4f4e0c512727" exitCode=137 Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.022239 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"00efdb20-390d-4535-b655-232832a018a6","Type":"ContainerDied","Data":"3d98668cbe08f8b110f81b720e5f0533b67270d72c51cd732a8d4f4e0c512727"} Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.023705 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerStarted","Data":"65f1dbc254d3bdd4ab7a7e65de1149a6634804cdc9a53441429a028b37854c97"} Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.321359 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.229:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.321512 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.229:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.566198 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.692846 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle\") pod \"00efdb20-390d-4535-b655-232832a018a6\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.692927 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5wc9\" (UniqueName: \"kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9\") pod \"00efdb20-390d-4535-b655-232832a018a6\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.692968 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data\") pod \"00efdb20-390d-4535-b655-232832a018a6\" (UID: \"00efdb20-390d-4535-b655-232832a018a6\") " Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.708446 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9" (OuterVolumeSpecName: "kube-api-access-x5wc9") pod "00efdb20-390d-4535-b655-232832a018a6" (UID: "00efdb20-390d-4535-b655-232832a018a6"). InnerVolumeSpecName "kube-api-access-x5wc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.726318 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data" (OuterVolumeSpecName: "config-data") pod "00efdb20-390d-4535-b655-232832a018a6" (UID: "00efdb20-390d-4535-b655-232832a018a6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.737363 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "00efdb20-390d-4535-b655-232832a018a6" (UID: "00efdb20-390d-4535-b655-232832a018a6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.802843 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5wc9\" (UniqueName: \"kubernetes.io/projected/00efdb20-390d-4535-b655-232832a018a6-kube-api-access-x5wc9\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.802881 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:21 crc kubenswrapper[4929]: I1122 07:53:21.802895 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/00efdb20-390d-4535-b655-232832a018a6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.095188 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"00efdb20-390d-4535-b655-232832a018a6","Type":"ContainerDied","Data":"8da170caadc5f39e30d174541c5e41f4bd4ef511902260de374186ae78bafe5a"} Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.095585 4929 scope.go:117] "RemoveContainer" containerID="3d98668cbe08f8b110f81b720e5f0533b67270d72c51cd732a8d4f4e0c512727" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.095811 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.112918 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-8677c466cb-74g99" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.124261 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-8677c466cb-74g99" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.160807 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-8677c466cb-74g99" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:22 crc kubenswrapper[4929]: E1122 07:53:22.236454 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod00efdb20_390d_4535_b655_232832a018a6.slice/crio-8da170caadc5f39e30d174541c5e41f4bd4ef511902260de374186ae78bafe5a\": RecentStats: unable to find data in memory cache]" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.242300 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.258291 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.268483 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:53:22 crc kubenswrapper[4929]: E1122 07:53:22.268920 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00efdb20-390d-4535-b655-232832a018a6" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.268936 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="00efdb20-390d-4535-b655-232832a018a6" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.269144 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="00efdb20-390d-4535-b655-232832a018a6" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.269857 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.278773 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.284558 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.284752 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.288941 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.416422 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.416663 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l57ks\" (UniqueName: \"kubernetes.io/projected/4a10d2fe-ff47-453d-a731-1b0077d89474-kube-api-access-l57ks\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.416751 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.416822 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.416958 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.518590 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.518685 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l57ks\" (UniqueName: \"kubernetes.io/projected/4a10d2fe-ff47-453d-a731-1b0077d89474-kube-api-access-l57ks\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.518715 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.518747 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.518825 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.527894 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.528620 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.529675 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.529985 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a10d2fe-ff47-453d-a731-1b0077d89474-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.538705 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l57ks\" (UniqueName: \"kubernetes.io/projected/4a10d2fe-ff47-453d-a731-1b0077d89474-kube-api-access-l57ks\") pod \"nova-cell1-novncproxy-0\" (UID: \"4a10d2fe-ff47-453d-a731-1b0077d89474\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:22 crc kubenswrapper[4929]: I1122 07:53:22.593600 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:23 crc kubenswrapper[4929]: I1122 07:53:23.057432 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 07:53:23 crc kubenswrapper[4929]: W1122 07:53:23.062351 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4a10d2fe_ff47_453d_a731_1b0077d89474.slice/crio-e96411ed4f1d056e4c9922628d3c4902119ad704da3a9591b0e165db9772b1db WatchSource:0}: Error finding container e96411ed4f1d056e4c9922628d3c4902119ad704da3a9591b0e165db9772b1db: Status 404 returned error can't find the container with id e96411ed4f1d056e4c9922628d3c4902119ad704da3a9591b0e165db9772b1db Nov 22 07:53:23 crc kubenswrapper[4929]: I1122 07:53:23.118444 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4a10d2fe-ff47-453d-a731-1b0077d89474","Type":"ContainerStarted","Data":"e96411ed4f1d056e4c9922628d3c4902119ad704da3a9591b0e165db9772b1db"} Nov 22 07:53:23 crc kubenswrapper[4929]: I1122 07:53:23.965111 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00efdb20-390d-4535-b655-232832a018a6" path="/var/lib/kubelet/pods/00efdb20-390d-4535-b655-232832a018a6/volumes" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.168177 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6d5d9c9567-b7l75" podUID="b2c46869-321d-4fb4-bc99-a9f61f36a236" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.170043 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-6d5d9c9567-b7l75" podUID="b2c46869-321d-4fb4-bc99-a9f61f36a236" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.170274 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-6d5d9c9567-b7l75" podUID="b2c46869-321d-4fb4-bc99-a9f61f36a236" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.186372 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.187362 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.187938 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.191518 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:53:25 crc kubenswrapper[4929]: I1122 07:53:25.948666 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:53:25 crc kubenswrapper[4929]: E1122 07:53:25.948978 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.152712 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"4a10d2fe-ff47-453d-a731-1b0077d89474","Type":"ContainerStarted","Data":"4421b21bcd4f0b50af826bec553bc7a398c8acfd4e8d84c7857d715977003d98"} Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.152950 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.156837 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.328873 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-w5q2v"] Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.331090 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.361105 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-w5q2v"] Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503108 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-config\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503424 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503450 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503467 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtfkq\" (UniqueName: \"kubernetes.io/projected/df8d4d7c-d69a-487e-87ad-04d59572e2ec-kube-api-access-dtfkq\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503504 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.503538 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605394 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-config\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605490 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605521 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605549 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtfkq\" (UniqueName: \"kubernetes.io/projected/df8d4d7c-d69a-487e-87ad-04d59572e2ec-kube-api-access-dtfkq\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605601 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.605652 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.606511 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-config\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.606511 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.607184 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.607445 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.607484 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df8d4d7c-d69a-487e-87ad-04d59572e2ec-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.626413 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtfkq\" (UniqueName: \"kubernetes.io/projected/df8d4d7c-d69a-487e-87ad-04d59572e2ec-kube-api-access-dtfkq\") pod \"dnsmasq-dns-89c5cd4d5-w5q2v\" (UID: \"df8d4d7c-d69a-487e-87ad-04d59572e2ec\") " pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:26 crc kubenswrapper[4929]: I1122 07:53:26.668307 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:27 crc kubenswrapper[4929]: I1122 07:53:27.191248 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=5.191223369 podStartE2EDuration="5.191223369s" podCreationTimestamp="2025-11-22 07:53:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:27.180912683 +0000 UTC m=+2544.290366696" watchObservedRunningTime="2025-11-22 07:53:27.191223369 +0000 UTC m=+2544.300677382" Nov 22 07:53:27 crc kubenswrapper[4929]: I1122 07:53:27.594564 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:28 crc kubenswrapper[4929]: I1122 07:53:28.840619 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:29 crc kubenswrapper[4929]: I1122 07:53:29.178145 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-log" containerID="cri-o://6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4" gracePeriod=30 Nov 22 07:53:29 crc kubenswrapper[4929]: I1122 07:53:29.178245 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-api" containerID="cri-o://9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c" gracePeriod=30 Nov 22 07:53:30 crc kubenswrapper[4929]: I1122 07:53:30.312838 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 07:53:30 crc kubenswrapper[4929]: I1122 07:53:30.318816 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 07:53:30 crc kubenswrapper[4929]: I1122 07:53:30.330754 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 07:53:31 crc kubenswrapper[4929]: I1122 07:53:31.197298 4929 generic.go:334] "Generic (PLEG): container finished" podID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerID="6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4" exitCode=143 Nov 22 07:53:31 crc kubenswrapper[4929]: I1122 07:53:31.197424 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerDied","Data":"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4"} Nov 22 07:53:31 crc kubenswrapper[4929]: I1122 07:53:31.202331 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 07:53:31 crc kubenswrapper[4929]: I1122 07:53:31.314399 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-w5q2v"] Nov 22 07:53:31 crc kubenswrapper[4929]: W1122 07:53:31.319116 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf8d4d7c_d69a_487e_87ad_04d59572e2ec.slice/crio-399a5558b72b4c8240562dcdc6ce1ae59f2ff4a6020735d3d3a9457a85403e6e WatchSource:0}: Error finding container 399a5558b72b4c8240562dcdc6ce1ae59f2ff4a6020735d3d3a9457a85403e6e: Status 404 returned error can't find the container with id 399a5558b72b4c8240562dcdc6ce1ae59f2ff4a6020735d3d3a9457a85403e6e Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.209144 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerStarted","Data":"570540a0454e39cd47d427b163de64205e045651fbca02789386d4cce9f16aa3"} Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.212172 4929 generic.go:334] "Generic (PLEG): container finished" podID="df8d4d7c-d69a-487e-87ad-04d59572e2ec" containerID="ec8094749632e0ee51cfb13dd6e09f0c267a339a4cb76b4ef152abf76f382db8" exitCode=0 Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.212284 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" event={"ID":"df8d4d7c-d69a-487e-87ad-04d59572e2ec","Type":"ContainerDied","Data":"ec8094749632e0ee51cfb13dd6e09f0c267a339a4cb76b4ef152abf76f382db8"} Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.212338 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" event={"ID":"df8d4d7c-d69a-487e-87ad-04d59572e2ec","Type":"ContainerStarted","Data":"399a5558b72b4c8240562dcdc6ce1ae59f2ff4a6020735d3d3a9457a85403e6e"} Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.594580 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:32 crc kubenswrapper[4929]: E1122 07:53:32.606115 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4f6babc_2993_4999_a2e2_58ea1401d2ff.slice/crio-conmon-9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c.scope\": RecentStats: unable to find data in memory cache]" Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.624322 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.782390 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.954351 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data\") pod \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.954582 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle\") pod \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.954643 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbmgm\" (UniqueName: \"kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm\") pod \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.954777 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs\") pod \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\" (UID: \"b4f6babc-2993-4999-a2e2-58ea1401d2ff\") " Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.955761 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs" (OuterVolumeSpecName: "logs") pod "b4f6babc-2993-4999-a2e2-58ea1401d2ff" (UID: "b4f6babc-2993-4999-a2e2-58ea1401d2ff"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.964768 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm" (OuterVolumeSpecName: "kube-api-access-mbmgm") pod "b4f6babc-2993-4999-a2e2-58ea1401d2ff" (UID: "b4f6babc-2993-4999-a2e2-58ea1401d2ff"). InnerVolumeSpecName "kube-api-access-mbmgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:32 crc kubenswrapper[4929]: I1122 07:53:32.990599 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4f6babc-2993-4999-a2e2-58ea1401d2ff" (UID: "b4f6babc-2993-4999-a2e2-58ea1401d2ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.000658 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data" (OuterVolumeSpecName: "config-data") pod "b4f6babc-2993-4999-a2e2-58ea1401d2ff" (UID: "b4f6babc-2993-4999-a2e2-58ea1401d2ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.057785 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.057817 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbmgm\" (UniqueName: \"kubernetes.io/projected/b4f6babc-2993-4999-a2e2-58ea1401d2ff-kube-api-access-mbmgm\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.057828 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4f6babc-2993-4999-a2e2-58ea1401d2ff-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.057836 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4f6babc-2993-4999-a2e2-58ea1401d2ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.224425 4929 generic.go:334] "Generic (PLEG): container finished" podID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerID="9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c" exitCode=0 Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.224488 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerDied","Data":"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c"} Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.224515 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b4f6babc-2993-4999-a2e2-58ea1401d2ff","Type":"ContainerDied","Data":"08a3102a3079f4821ba1bfe7299dc79581f3740a0b84a4b5d597b8dcf4669356"} Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.224532 4929 scope.go:117] "RemoveContainer" containerID="9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.224647 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.228108 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" event={"ID":"df8d4d7c-d69a-487e-87ad-04d59572e2ec","Type":"ContainerStarted","Data":"f6ed505d185fb329a5f701384f7ec5b0e2575fb1fd8de568987d6d0ec91b72d1"} Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.228761 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.258830 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" podStartSLOduration=7.258808265 podStartE2EDuration="7.258808265s" podCreationTimestamp="2025-11-22 07:53:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:33.256523359 +0000 UTC m=+2550.365977372" watchObservedRunningTime="2025-11-22 07:53:33.258808265 +0000 UTC m=+2550.368262288" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.279388 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.289882 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.296639 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.300115 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:33 crc kubenswrapper[4929]: E1122 07:53:33.300636 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-log" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.300660 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-log" Nov 22 07:53:33 crc kubenswrapper[4929]: E1122 07:53:33.300689 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-api" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.300697 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-api" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.300929 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-api" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.300971 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" containerName="nova-api-log" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.302064 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.306696 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.307442 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.307564 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.349070 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.391181 4929 scope.go:117] "RemoveContainer" containerID="6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467042 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467167 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467238 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467313 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467449 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.467512 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmg8l\" (UniqueName: \"kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.526968 4929 scope.go:117] "RemoveContainer" containerID="9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c" Nov 22 07:53:33 crc kubenswrapper[4929]: E1122 07:53:33.527447 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c\": container with ID starting with 9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c not found: ID does not exist" containerID="9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.527517 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c"} err="failed to get container status \"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c\": rpc error: code = NotFound desc = could not find container \"9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c\": container with ID starting with 9096213c996bf3c7ecf74e554336238b7a89104647b8ccfde7761223c6b8121c not found: ID does not exist" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.527553 4929 scope.go:117] "RemoveContainer" containerID="6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4" Nov 22 07:53:33 crc kubenswrapper[4929]: E1122 07:53:33.528716 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4\": container with ID starting with 6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4 not found: ID does not exist" containerID="6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.528760 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4"} err="failed to get container status \"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4\": rpc error: code = NotFound desc = could not find container \"6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4\": container with ID starting with 6be8678b67ec38d7fb0b7371b702ea78c7d70a8a70f7395905f3e7d2acc4b6e4 not found: ID does not exist" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.570955 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571094 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571160 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmg8l\" (UniqueName: \"kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571261 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571316 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571357 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.571823 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.576413 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.577232 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.577719 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.578319 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.594665 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmg8l\" (UniqueName: \"kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l\") pod \"nova-api-0\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.624700 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:53:33 crc kubenswrapper[4929]: I1122 07:53:33.965538 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4f6babc-2993-4999-a2e2-58ea1401d2ff" path="/var/lib/kubelet/pods/b4f6babc-2993-4999-a2e2-58ea1401d2ff/volumes" Nov 22 07:53:34 crc kubenswrapper[4929]: I1122 07:53:34.174420 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:53:34 crc kubenswrapper[4929]: I1122 07:53:34.264550 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerStarted","Data":"112dd1cde7025f6cd26eb94cb06ec11ed38cf7854b5aea3de0630122e7ebf6e0"} Nov 22 07:53:34 crc kubenswrapper[4929]: I1122 07:53:34.270144 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerStarted","Data":"5b3148656e6feebe7615b8734d4da2e569ac96ca7268af41ed00ad2e61f746ea"} Nov 22 07:53:34 crc kubenswrapper[4929]: I1122 07:53:34.659806 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:35 crc kubenswrapper[4929]: I1122 07:53:35.281515 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerStarted","Data":"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a"} Nov 22 07:53:35 crc kubenswrapper[4929]: I1122 07:53:35.281874 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerStarted","Data":"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202"} Nov 22 07:53:35 crc kubenswrapper[4929]: I1122 07:53:35.288736 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerStarted","Data":"c5003a790fe5d9814ac1325156007fa0bfe6d0a83b3b82189fdf62ffa0b95a4f"} Nov 22 07:53:35 crc kubenswrapper[4929]: I1122 07:53:35.308574 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.308550662 podStartE2EDuration="2.308550662s" podCreationTimestamp="2025-11-22 07:53:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:53:35.304673215 +0000 UTC m=+2552.414127228" watchObservedRunningTime="2025-11-22 07:53:35.308550662 +0000 UTC m=+2552.418004675" Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.307872 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerStarted","Data":"03a64f9e9e270bb3aaf7560a7bb580832c2b0538b9c46c59d8f4bc575b7cc191"} Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.308443 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.308090 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="sg-core" containerID="cri-o://c5003a790fe5d9814ac1325156007fa0bfe6d0a83b3b82189fdf62ffa0b95a4f" gracePeriod=30 Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.308009 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-central-agent" containerID="cri-o://570540a0454e39cd47d427b163de64205e045651fbca02789386d4cce9f16aa3" gracePeriod=30 Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.308127 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-notification-agent" containerID="cri-o://112dd1cde7025f6cd26eb94cb06ec11ed38cf7854b5aea3de0630122e7ebf6e0" gracePeriod=30 Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.308114 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="proxy-httpd" containerID="cri-o://03a64f9e9e270bb3aaf7560a7bb580832c2b0538b9c46c59d8f4bc575b7cc191" gracePeriod=30 Nov 22 07:53:37 crc kubenswrapper[4929]: I1122 07:53:37.331663 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.322622335 podStartE2EDuration="18.331643934s" podCreationTimestamp="2025-11-22 07:53:19 +0000 UTC" firstStartedPulling="2025-11-22 07:53:20.229319069 +0000 UTC m=+2537.338773082" lastFinishedPulling="2025-11-22 07:53:36.238340668 +0000 UTC m=+2553.347794681" observedRunningTime="2025-11-22 07:53:37.330034484 +0000 UTC m=+2554.439488497" watchObservedRunningTime="2025-11-22 07:53:37.331643934 +0000 UTC m=+2554.441097947" Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.327883 4929 generic.go:334] "Generic (PLEG): container finished" podID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerID="03a64f9e9e270bb3aaf7560a7bb580832c2b0538b9c46c59d8f4bc575b7cc191" exitCode=0 Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.328234 4929 generic.go:334] "Generic (PLEG): container finished" podID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerID="c5003a790fe5d9814ac1325156007fa0bfe6d0a83b3b82189fdf62ffa0b95a4f" exitCode=2 Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.328243 4929 generic.go:334] "Generic (PLEG): container finished" podID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerID="112dd1cde7025f6cd26eb94cb06ec11ed38cf7854b5aea3de0630122e7ebf6e0" exitCode=0 Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.327975 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerDied","Data":"03a64f9e9e270bb3aaf7560a7bb580832c2b0538b9c46c59d8f4bc575b7cc191"} Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.328298 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerDied","Data":"c5003a790fe5d9814ac1325156007fa0bfe6d0a83b3b82189fdf62ffa0b95a4f"} Nov 22 07:53:38 crc kubenswrapper[4929]: I1122 07:53:38.328325 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerDied","Data":"112dd1cde7025f6cd26eb94cb06ec11ed38cf7854b5aea3de0630122e7ebf6e0"} Nov 22 07:53:39 crc kubenswrapper[4929]: I1122 07:53:39.948084 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:53:39 crc kubenswrapper[4929]: E1122 07:53:39.948793 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.355824 4929 generic.go:334] "Generic (PLEG): container finished" podID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerID="570540a0454e39cd47d427b163de64205e045651fbca02789386d4cce9f16aa3" exitCode=0 Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.355900 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerDied","Data":"570540a0454e39cd47d427b163de64205e045651fbca02789386d4cce9f16aa3"} Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.356287 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"84080ce2-3ac9-40cb-b5d4-2674cefbfea3","Type":"ContainerDied","Data":"65f1dbc254d3bdd4ab7a7e65de1149a6634804cdc9a53441429a028b37854c97"} Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.356313 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65f1dbc254d3bdd4ab7a7e65de1149a6634804cdc9a53441429a028b37854c97" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.440071 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531377 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531464 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531511 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531551 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531598 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531658 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531743 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.531851 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sgjm\" (UniqueName: \"kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm\") pod \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\" (UID: \"84080ce2-3ac9-40cb-b5d4-2674cefbfea3\") " Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.532157 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.532414 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.532858 4929 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.532881 4929 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.538709 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm" (OuterVolumeSpecName: "kube-api-access-4sgjm") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "kube-api-access-4sgjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.538735 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts" (OuterVolumeSpecName: "scripts") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.573361 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.618484 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.628011 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.635295 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sgjm\" (UniqueName: \"kubernetes.io/projected/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-kube-api-access-4sgjm\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.635321 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.635330 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.635338 4929 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.635346 4929 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.637484 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data" (OuterVolumeSpecName: "config-data") pod "84080ce2-3ac9-40cb-b5d4-2674cefbfea3" (UID: "84080ce2-3ac9-40cb-b5d4-2674cefbfea3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.671349 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-w5q2v" Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.732600 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.733396 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="dnsmasq-dns" containerID="cri-o://7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8" gracePeriod=10 Nov 22 07:53:41 crc kubenswrapper[4929]: I1122 07:53:41.737163 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84080ce2-3ac9-40cb-b5d4-2674cefbfea3-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.323946 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384596 4929 generic.go:334] "Generic (PLEG): container finished" podID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerID="7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8" exitCode=0 Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384671 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384719 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384709 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerDied","Data":"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8"} Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384776 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" event={"ID":"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d","Type":"ContainerDied","Data":"6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4"} Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.384804 4929 scope.go:117] "RemoveContainer" containerID="7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.413598 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.415232 4929 scope.go:117] "RemoveContainer" containerID="f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.425132 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.453024 4929 scope.go:117] "RemoveContainer" containerID="7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.453542 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8\": container with ID starting with 7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8 not found: ID does not exist" containerID="7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.453580 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8"} err="failed to get container status \"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8\": rpc error: code = NotFound desc = could not find container \"7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8\": container with ID starting with 7f0d6d3aca35764a9056552a0452ad8f81a227ff90bfdf6842ca1354b2691ef8 not found: ID does not exist" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.453600 4929 scope.go:117] "RemoveContainer" containerID="f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.454051 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b\": container with ID starting with f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b not found: ID does not exist" containerID="f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454073 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b"} err="failed to get container status \"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b\": rpc error: code = NotFound desc = could not find container \"f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b\": container with ID starting with f3cffe924fffabfe365fe6354af606a9603b1b7b1a927fe6241b97c47a56810b not found: ID does not exist" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454744 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454866 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454890 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454915 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.454966 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.455011 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqnck\" (UniqueName: \"kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck\") pod \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\" (UID: \"3d531c11-4aa9-4fe9-ba25-ef5b3a28438d\") " Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456263 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456759 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-notification-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456781 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-notification-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456818 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="sg-core" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456827 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="sg-core" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456844 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="proxy-httpd" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456851 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="proxy-httpd" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456861 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="init" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456867 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="init" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456900 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="dnsmasq-dns" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456907 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="dnsmasq-dns" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.456915 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-central-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.456922 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-central-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.457132 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="sg-core" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.457160 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="proxy-httpd" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.457174 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-central-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.457189 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" containerName="ceilometer-notification-agent" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.457198 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="dnsmasq-dns" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.459538 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.461780 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck" (OuterVolumeSpecName: "kube-api-access-rqnck") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "kube-api-access-rqnck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.466640 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.469461 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.469680 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.482789 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.528893 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.530328 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.531374 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.542660 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.545634 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config" (OuterVolumeSpecName: "config") pod "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" (UID: "3d531c11-4aa9-4fe9-ba25-ef5b3a28438d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.557443 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-config-data\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.557492 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5c8k\" (UniqueName: \"kubernetes.io/projected/ca38dd37-7bd6-475b-917d-459f11c50877-kube-api-access-l5c8k\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.557515 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-scripts\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.557834 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.557992 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-log-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558037 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558097 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-run-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558139 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558354 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558381 4929 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558397 4929 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558408 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558422 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqnck\" (UniqueName: \"kubernetes.io/projected/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-kube-api-access-rqnck\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.558435 4929 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.660066 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5c8k\" (UniqueName: \"kubernetes.io/projected/ca38dd37-7bd6-475b-917d-459f11c50877-kube-api-access-l5c8k\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.660728 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-config-data\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.660826 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-scripts\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.660988 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.661093 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-log-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.661170 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.661262 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-run-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.661337 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.663018 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-log-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.664057 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ca38dd37-7bd6-475b-917d-459f11c50877-run-httpd\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.665741 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.666782 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-config-data\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.667384 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.668285 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.668455 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ca38dd37-7bd6-475b-917d-459f11c50877-scripts\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.686504 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5c8k\" (UniqueName: \"kubernetes.io/projected/ca38dd37-7bd6-475b-917d-459f11c50877-kube-api-access-l5c8k\") pod \"ceilometer-0\" (UID: \"ca38dd37-7bd6-475b-917d-459f11c50877\") " pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.721757 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.733935 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-g62kx"] Nov 22 07:53:42 crc kubenswrapper[4929]: I1122 07:53:42.879442 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 07:53:42 crc kubenswrapper[4929]: E1122 07:53:42.907126 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d531c11_4aa9_4fe9_ba25_ef5b3a28438d.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d531c11_4aa9_4fe9_ba25_ef5b3a28438d.slice/crio-6519d63b6100543724625a8604aaace21f5443e57ef85b750c3367284888e5b4\": RecentStats: unable to find data in memory cache]" Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.374903 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.399795 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca38dd37-7bd6-475b-917d-459f11c50877","Type":"ContainerStarted","Data":"821656b55712478c3503583d4c47a6f451bf24e5439e860d3cc93b6b9ac23fe7"} Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.627795 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.630529 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.956956 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" path="/var/lib/kubelet/pods/3d531c11-4aa9-4fe9-ba25-ef5b3a28438d/volumes" Nov 22 07:53:43 crc kubenswrapper[4929]: I1122 07:53:43.957812 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84080ce2-3ac9-40cb-b5d4-2674cefbfea3" path="/var/lib/kubelet/pods/84080ce2-3ac9-40cb-b5d4-2674cefbfea3/volumes" Nov 22 07:53:44 crc kubenswrapper[4929]: I1122 07:53:44.641384 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.233:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:44 crc kubenswrapper[4929]: I1122 07:53:44.641384 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.233:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:53:46 crc kubenswrapper[4929]: I1122 07:53:46.427180 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca38dd37-7bd6-475b-917d-459f11c50877","Type":"ContainerStarted","Data":"9acbb6c626b2a9bb71c05e2682209bd18c5979d5a9eca404fbae5b85a1ea764d"} Nov 22 07:53:47 crc kubenswrapper[4929]: I1122 07:53:47.082903 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-757b4f8459-g62kx" podUID="3d531c11-4aa9-4fe9-ba25-ef5b3a28438d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.222:5353: i/o timeout" Nov 22 07:53:48 crc kubenswrapper[4929]: I1122 07:53:48.445774 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca38dd37-7bd6-475b-917d-459f11c50877","Type":"ContainerStarted","Data":"179a712e1523f0cf81b3a9d4158516dd34a24ad239a080e12bcd70488b977533"} Nov 22 07:53:49 crc kubenswrapper[4929]: I1122 07:53:49.457473 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca38dd37-7bd6-475b-917d-459f11c50877","Type":"ContainerStarted","Data":"c8fc22a9a5937bf8405938b26186a5d83c95028d1c59f849da0e709a16c427b5"} Nov 22 07:53:52 crc kubenswrapper[4929]: I1122 07:53:52.046760 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.490099 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ca38dd37-7bd6-475b-917d-459f11c50877","Type":"ContainerStarted","Data":"f571f04c436bc6a82be58ab69d8a5e2208d1f681a2d78a7c4c195f969cfb9145"} Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.490627 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.520499 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.01114857 podStartE2EDuration="11.520477177s" podCreationTimestamp="2025-11-22 07:53:42 +0000 UTC" firstStartedPulling="2025-11-22 07:53:43.383061757 +0000 UTC m=+2560.492515760" lastFinishedPulling="2025-11-22 07:53:51.892390354 +0000 UTC m=+2569.001844367" observedRunningTime="2025-11-22 07:53:53.518015736 +0000 UTC m=+2570.627469749" watchObservedRunningTime="2025-11-22 07:53:53.520477177 +0000 UTC m=+2570.629931190" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.641016 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.642632 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.647938 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:53:53 crc kubenswrapper[4929]: I1122 07:53:53.651237 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:53:54 crc kubenswrapper[4929]: I1122 07:53:54.505796 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:53:54 crc kubenswrapper[4929]: I1122 07:53:54.513539 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:53:54 crc kubenswrapper[4929]: I1122 07:53:54.948350 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:53:54 crc kubenswrapper[4929]: E1122 07:53:54.948855 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.162169 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6d5d9c9567-b7l75" Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.226166 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.226977 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8677c466cb-74g99" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" containerID="cri-o://369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a" gracePeriod=30 Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.227169 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-8677c466cb-74g99" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-api" containerID="cri-o://1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4" gracePeriod=30 Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.515606 4929 generic.go:334] "Generic (PLEG): container finished" podID="864b28ed-cf71-451f-8af0-61d616497ee7" containerID="369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a" exitCode=0 Nov 22 07:53:55 crc kubenswrapper[4929]: I1122 07:53:55.515689 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerDied","Data":"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a"} Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.078368 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.256175 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs\") pod \"864b28ed-cf71-451f-8af0-61d616497ee7\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.256265 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle\") pod \"864b28ed-cf71-451f-8af0-61d616497ee7\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.256357 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config\") pod \"864b28ed-cf71-451f-8af0-61d616497ee7\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.256454 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pmk47\" (UniqueName: \"kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47\") pod \"864b28ed-cf71-451f-8af0-61d616497ee7\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.256628 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config\") pod \"864b28ed-cf71-451f-8af0-61d616497ee7\" (UID: \"864b28ed-cf71-451f-8af0-61d616497ee7\") " Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.265156 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "864b28ed-cf71-451f-8af0-61d616497ee7" (UID: "864b28ed-cf71-451f-8af0-61d616497ee7"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.265396 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47" (OuterVolumeSpecName: "kube-api-access-pmk47") pod "864b28ed-cf71-451f-8af0-61d616497ee7" (UID: "864b28ed-cf71-451f-8af0-61d616497ee7"). InnerVolumeSpecName "kube-api-access-pmk47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.332943 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "864b28ed-cf71-451f-8af0-61d616497ee7" (UID: "864b28ed-cf71-451f-8af0-61d616497ee7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.359759 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.359794 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pmk47\" (UniqueName: \"kubernetes.io/projected/864b28ed-cf71-451f-8af0-61d616497ee7-kube-api-access-pmk47\") on node \"crc\" DevicePath \"\"" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.359811 4929 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.380823 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config" (OuterVolumeSpecName: "config") pod "864b28ed-cf71-451f-8af0-61d616497ee7" (UID: "864b28ed-cf71-451f-8af0-61d616497ee7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.380863 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "864b28ed-cf71-451f-8af0-61d616497ee7" (UID: "864b28ed-cf71-451f-8af0-61d616497ee7"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.461449 4929 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-config\") on node \"crc\" DevicePath \"\"" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.461485 4929 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/864b28ed-cf71-451f-8af0-61d616497ee7-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.608394 4929 generic.go:334] "Generic (PLEG): container finished" podID="864b28ed-cf71-451f-8af0-61d616497ee7" containerID="1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4" exitCode=0 Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.608438 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerDied","Data":"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4"} Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.608487 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-8677c466cb-74g99" event={"ID":"864b28ed-cf71-451f-8af0-61d616497ee7","Type":"ContainerDied","Data":"abd8e5f160c887e05426580237bfcbbdee0b52e5651f1d42f7a1e73edab8f2a0"} Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.608505 4929 scope.go:117] "RemoveContainer" containerID="369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.608642 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-8677c466cb-74g99" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.675996 4929 scope.go:117] "RemoveContainer" containerID="1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.690552 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.695105 4929 scope.go:117] "RemoveContainer" containerID="369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a" Nov 22 07:54:00 crc kubenswrapper[4929]: E1122 07:54:00.695621 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a\": container with ID starting with 369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a not found: ID does not exist" containerID="369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.695662 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a"} err="failed to get container status \"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a\": rpc error: code = NotFound desc = could not find container \"369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a\": container with ID starting with 369b9e5a502dad2f024489718d4987ad29e9703768bee8f75fff35b638d9493a not found: ID does not exist" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.695694 4929 scope.go:117] "RemoveContainer" containerID="1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4" Nov 22 07:54:00 crc kubenswrapper[4929]: E1122 07:54:00.696022 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4\": container with ID starting with 1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4 not found: ID does not exist" containerID="1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.696057 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4"} err="failed to get container status \"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4\": rpc error: code = NotFound desc = could not find container \"1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4\": container with ID starting with 1254bb0deb6c2be230ff9eadab2ede58d0d2cce8ebf813d10e64d1a0aa3115f4 not found: ID does not exist" Nov 22 07:54:00 crc kubenswrapper[4929]: I1122 07:54:00.698298 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-8677c466cb-74g99"] Nov 22 07:54:01 crc kubenswrapper[4929]: I1122 07:54:01.047402 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-r9kl7"] Nov 22 07:54:01 crc kubenswrapper[4929]: I1122 07:54:01.055690 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-r9kl7"] Nov 22 07:54:01 crc kubenswrapper[4929]: I1122 07:54:01.960381 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" path="/var/lib/kubelet/pods/864b28ed-cf71-451f-8af0-61d616497ee7/volumes" Nov 22 07:54:01 crc kubenswrapper[4929]: I1122 07:54:01.961349 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b387b4f1-8159-4237-9e71-5235ffc8ac5a" path="/var/lib/kubelet/pods/b387b4f1-8159-4237-9e71-5235ffc8ac5a/volumes" Nov 22 07:54:09 crc kubenswrapper[4929]: I1122 07:54:09.947990 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:54:09 crc kubenswrapper[4929]: E1122 07:54:09.949727 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:54:11 crc kubenswrapper[4929]: I1122 07:54:11.160973 4929 scope.go:117] "RemoveContainer" containerID="b3b3bcd0576f944531b8e744b5ec5673fed69095061d999c4072383acda50cc0" Nov 22 07:54:12 crc kubenswrapper[4929]: I1122 07:54:12.892679 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 07:54:24 crc kubenswrapper[4929]: I1122 07:54:24.948057 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:54:24 crc kubenswrapper[4929]: E1122 07:54:24.949055 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:54:36 crc kubenswrapper[4929]: I1122 07:54:36.947508 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:54:36 crc kubenswrapper[4929]: E1122 07:54:36.948324 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:54:50 crc kubenswrapper[4929]: I1122 07:54:50.947584 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:54:50 crc kubenswrapper[4929]: E1122 07:54:50.948781 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:01 crc kubenswrapper[4929]: I1122 07:55:01.947187 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:55:01 crc kubenswrapper[4929]: E1122 07:55:01.947933 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:06 crc kubenswrapper[4929]: I1122 07:55:06.383137 4929 generic.go:334] "Generic (PLEG): container finished" podID="276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" containerID="3f06f4053b60d4e141beeb4898f5c10ca17684c7370470eab5853c918287e253" exitCode=0 Nov 22 07:55:06 crc kubenswrapper[4929]: I1122 07:55:06.383252 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" event={"ID":"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff","Type":"ContainerDied","Data":"3f06f4053b60d4e141beeb4898f5c10ca17684c7370470eab5853c918287e253"} Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.711818 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.783055 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wl5jb\" (UniqueName: \"kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb\") pod \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.783194 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle\") pod \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.783324 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts\") pod \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.784292 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data\") pod \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\" (UID: \"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff\") " Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.788523 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts" (OuterVolumeSpecName: "scripts") pod "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" (UID: "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.788890 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb" (OuterVolumeSpecName: "kube-api-access-wl5jb") pod "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" (UID: "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff"). InnerVolumeSpecName "kube-api-access-wl5jb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.812052 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data" (OuterVolumeSpecName: "config-data") pod "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" (UID: "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.812887 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" (UID: "276eb7e8-f411-4d79-8ca1-0a7d55fa83ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.886932 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.886981 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.886993 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wl5jb\" (UniqueName: \"kubernetes.io/projected/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-kube-api-access-wl5jb\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:07 crc kubenswrapper[4929]: I1122 07:55:07.887005 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.407324 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" event={"ID":"276eb7e8-f411-4d79-8ca1-0a7d55fa83ff","Type":"ContainerDied","Data":"7be002588d7b43a9936513af2609b0249fd8948ccf523eb63d09f1f7cadc71ed"} Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.407358 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tf9kg" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.407360 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7be002588d7b43a9936513af2609b0249fd8948ccf523eb63d09f1f7cadc71ed" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.492466 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 07:55:08 crc kubenswrapper[4929]: E1122 07:55:08.492981 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" containerName="nova-cell1-conductor-db-sync" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493000 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" containerName="nova-cell1-conductor-db-sync" Nov 22 07:55:08 crc kubenswrapper[4929]: E1122 07:55:08.493028 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-api" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493036 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-api" Nov 22 07:55:08 crc kubenswrapper[4929]: E1122 07:55:08.493056 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493064 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493303 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" containerName="nova-cell1-conductor-db-sync" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493339 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-httpd" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.493357 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="864b28ed-cf71-451f-8af0-61d616497ee7" containerName="neutron-api" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.494154 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.497556 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.509787 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.598878 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjrsn\" (UniqueName: \"kubernetes.io/projected/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-kube-api-access-zjrsn\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.598952 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.599263 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.701109 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.701374 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.701627 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjrsn\" (UniqueName: \"kubernetes.io/projected/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-kube-api-access-zjrsn\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.706810 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.710011 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.718580 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjrsn\" (UniqueName: \"kubernetes.io/projected/58b3b346-fb7a-4957-b9e3-eadbfb64deb6-kube-api-access-zjrsn\") pod \"nova-cell1-conductor-0\" (UID: \"58b3b346-fb7a-4957-b9e3-eadbfb64deb6\") " pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:08 crc kubenswrapper[4929]: I1122 07:55:08.813854 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:09 crc kubenswrapper[4929]: I1122 07:55:09.230482 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 07:55:09 crc kubenswrapper[4929]: I1122 07:55:09.424290 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"58b3b346-fb7a-4957-b9e3-eadbfb64deb6","Type":"ContainerStarted","Data":"a42951469ae18d2a97defe82f1e256b1e5ed127a12513e96a8536ce5cd66dd1a"} Nov 22 07:55:09 crc kubenswrapper[4929]: I1122 07:55:09.424342 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"58b3b346-fb7a-4957-b9e3-eadbfb64deb6","Type":"ContainerStarted","Data":"504130eae60056535446d64400624b4bb3bd6cd906459c7981611c793d0844ee"} Nov 22 07:55:09 crc kubenswrapper[4929]: I1122 07:55:09.424372 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:11 crc kubenswrapper[4929]: I1122 07:55:11.548308 4929 scope.go:117] "RemoveContainer" containerID="b4482b450cf2f85ec75e4bb17c90708a8e4aa915651ddef297737c543c3c717a" Nov 22 07:55:11 crc kubenswrapper[4929]: I1122 07:55:11.583385 4929 scope.go:117] "RemoveContainer" containerID="6e0180514f75f36d512ef82c11723603a0b065dfa813eaedc7ccc5f2491902df" Nov 22 07:55:12 crc kubenswrapper[4929]: I1122 07:55:12.947185 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:55:12 crc kubenswrapper[4929]: E1122 07:55:12.947687 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:18 crc kubenswrapper[4929]: I1122 07:55:18.844132 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 22 07:55:18 crc kubenswrapper[4929]: I1122 07:55:18.870470 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=10.870442556 podStartE2EDuration="10.870442556s" podCreationTimestamp="2025-11-22 07:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:55:09.43847266 +0000 UTC m=+2646.547926673" watchObservedRunningTime="2025-11-22 07:55:18.870442556 +0000 UTC m=+2655.979896579" Nov 22 07:55:19 crc kubenswrapper[4929]: I1122 07:55:19.933894 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-x8qjf"] Nov 22 07:55:19 crc kubenswrapper[4929]: I1122 07:55:19.935137 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:19 crc kubenswrapper[4929]: I1122 07:55:19.938074 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 22 07:55:19 crc kubenswrapper[4929]: I1122 07:55:19.938267 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 22 07:55:19 crc kubenswrapper[4929]: I1122 07:55:19.975778 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-x8qjf"] Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.038594 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5cl4\" (UniqueName: \"kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.038650 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.038984 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.039107 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.141538 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.141864 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.141991 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5cl4\" (UniqueName: \"kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.142019 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.152810 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.153902 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.165944 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.166885 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5cl4\" (UniqueName: \"kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4\") pod \"nova-cell1-cell-mapping-x8qjf\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.266529 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:20 crc kubenswrapper[4929]: I1122 07:55:20.806392 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-x8qjf"] Nov 22 07:55:21 crc kubenswrapper[4929]: I1122 07:55:21.585539 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x8qjf" event={"ID":"861208ae-0e23-4cab-9ac4-b08cade2fead","Type":"ContainerStarted","Data":"5d004db3ce6a052db7901def25365e428f0ce7cabacf8a57a1b2750cbc41bb80"} Nov 22 07:55:21 crc kubenswrapper[4929]: I1122 07:55:21.585864 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x8qjf" event={"ID":"861208ae-0e23-4cab-9ac4-b08cade2fead","Type":"ContainerStarted","Data":"a5291100aca6fc08b683e8a97a0bd2ae7f7db1353f9f32328d8cfcfea351cb0b"} Nov 22 07:55:21 crc kubenswrapper[4929]: I1122 07:55:21.607358 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-x8qjf" podStartSLOduration=2.607339491 podStartE2EDuration="2.607339491s" podCreationTimestamp="2025-11-22 07:55:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:55:21.60649002 +0000 UTC m=+2658.715944043" watchObservedRunningTime="2025-11-22 07:55:21.607339491 +0000 UTC m=+2658.716793504" Nov 22 07:55:25 crc kubenswrapper[4929]: I1122 07:55:25.947401 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:55:25 crc kubenswrapper[4929]: E1122 07:55:25.949482 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:26 crc kubenswrapper[4929]: I1122 07:55:26.637930 4929 generic.go:334] "Generic (PLEG): container finished" podID="861208ae-0e23-4cab-9ac4-b08cade2fead" containerID="5d004db3ce6a052db7901def25365e428f0ce7cabacf8a57a1b2750cbc41bb80" exitCode=0 Nov 22 07:55:26 crc kubenswrapper[4929]: I1122 07:55:26.637989 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x8qjf" event={"ID":"861208ae-0e23-4cab-9ac4-b08cade2fead","Type":"ContainerDied","Data":"5d004db3ce6a052db7901def25365e428f0ce7cabacf8a57a1b2750cbc41bb80"} Nov 22 07:55:27 crc kubenswrapper[4929]: I1122 07:55:27.979825 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.120711 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts\") pod \"861208ae-0e23-4cab-9ac4-b08cade2fead\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.120921 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5cl4\" (UniqueName: \"kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4\") pod \"861208ae-0e23-4cab-9ac4-b08cade2fead\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.121004 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data\") pod \"861208ae-0e23-4cab-9ac4-b08cade2fead\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.121056 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle\") pod \"861208ae-0e23-4cab-9ac4-b08cade2fead\" (UID: \"861208ae-0e23-4cab-9ac4-b08cade2fead\") " Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.126549 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts" (OuterVolumeSpecName: "scripts") pod "861208ae-0e23-4cab-9ac4-b08cade2fead" (UID: "861208ae-0e23-4cab-9ac4-b08cade2fead"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.126947 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4" (OuterVolumeSpecName: "kube-api-access-m5cl4") pod "861208ae-0e23-4cab-9ac4-b08cade2fead" (UID: "861208ae-0e23-4cab-9ac4-b08cade2fead"). InnerVolumeSpecName "kube-api-access-m5cl4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.151749 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "861208ae-0e23-4cab-9ac4-b08cade2fead" (UID: "861208ae-0e23-4cab-9ac4-b08cade2fead"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.155937 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data" (OuterVolumeSpecName: "config-data") pod "861208ae-0e23-4cab-9ac4-b08cade2fead" (UID: "861208ae-0e23-4cab-9ac4-b08cade2fead"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.224248 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5cl4\" (UniqueName: \"kubernetes.io/projected/861208ae-0e23-4cab-9ac4-b08cade2fead-kube-api-access-m5cl4\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.224292 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.224306 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.224317 4929 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/861208ae-0e23-4cab-9ac4-b08cade2fead-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.661135 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-x8qjf" event={"ID":"861208ae-0e23-4cab-9ac4-b08cade2fead","Type":"ContainerDied","Data":"a5291100aca6fc08b683e8a97a0bd2ae7f7db1353f9f32328d8cfcfea351cb0b"} Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.661180 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5291100aca6fc08b683e8a97a0bd2ae7f7db1353f9f32328d8cfcfea351cb0b" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.661206 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-x8qjf" Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.893949 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.894476 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-log" containerID="cri-o://98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202" gracePeriod=30 Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.894911 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-api" containerID="cri-o://53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a" gracePeriod=30 Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.914689 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.914903 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerName="nova-scheduler-scheduler" containerID="cri-o://9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" gracePeriod=30 Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.939655 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.940008 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" containerID="cri-o://9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd" gracePeriod=30 Nov 22 07:55:28 crc kubenswrapper[4929]: I1122 07:55:28.940707 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" containerID="cri-o://194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43" gracePeriod=30 Nov 22 07:55:29 crc kubenswrapper[4929]: I1122 07:55:29.671825 4929 generic.go:334] "Generic (PLEG): container finished" podID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerID="9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd" exitCode=143 Nov 22 07:55:29 crc kubenswrapper[4929]: I1122 07:55:29.671897 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerDied","Data":"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd"} Nov 22 07:55:29 crc kubenswrapper[4929]: I1122 07:55:29.673952 4929 generic.go:334] "Generic (PLEG): container finished" podID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerID="98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202" exitCode=143 Nov 22 07:55:29 crc kubenswrapper[4929]: I1122 07:55:29.673979 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerDied","Data":"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202"} Nov 22 07:55:30 crc kubenswrapper[4929]: E1122 07:55:30.241311 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:55:30 crc kubenswrapper[4929]: E1122 07:55:30.244081 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:55:30 crc kubenswrapper[4929]: E1122 07:55:30.245159 4929 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 07:55:30 crc kubenswrapper[4929]: E1122 07:55:30.245204 4929 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerName="nova-scheduler-scheduler" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.081671 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.229:8775/\": read tcp 10.217.0.2:35666->10.217.0.229:8775: read: connection reset by peer" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.081683 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.229:8775/\": read tcp 10.217.0.2:35670->10.217.0.229:8775: read: connection reset by peer" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.659767 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.663938 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.670233 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.715737 4929 generic.go:334] "Generic (PLEG): container finished" podID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerID="53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a" exitCode=0 Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.715821 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerDied","Data":"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.715854 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3e1faa08-de2a-4f06-a9a6-65245d240c19","Type":"ContainerDied","Data":"5b3148656e6feebe7615b8734d4da2e569ac96ca7268af41ed00ad2e61f746ea"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.715874 4929 scope.go:117] "RemoveContainer" containerID="53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.716027 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.722330 4929 generic.go:334] "Generic (PLEG): container finished" podID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" exitCode=0 Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.722423 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4a828f0a-c50e-4f76-829b-543df4ac95e9","Type":"ContainerDied","Data":"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.722454 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4a828f0a-c50e-4f76-829b-543df4ac95e9","Type":"ContainerDied","Data":"59747d559209246afa62f890fd02a2dd69afb51d1c117ab8a8b980dba14025ba"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.722552 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.726033 4929 generic.go:334] "Generic (PLEG): container finished" podID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerID="194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43" exitCode=0 Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.726074 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerDied","Data":"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.726106 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"573b60bf-1459-4e7f-9c71-f453e0d01074","Type":"ContainerDied","Data":"10f865caffe753521ff9106580f6cfe084b5c4d3527be6cefcca6a44e913ac8c"} Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.726167 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745300 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745378 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745458 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle\") pod \"573b60bf-1459-4e7f-9c71-f453e0d01074\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745515 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745541 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data\") pod \"573b60bf-1459-4e7f-9c71-f453e0d01074\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745570 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhlpt\" (UniqueName: \"kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt\") pod \"573b60bf-1459-4e7f-9c71-f453e0d01074\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745595 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmg8l\" (UniqueName: \"kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745640 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs\") pod \"573b60bf-1459-4e7f-9c71-f453e0d01074\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745663 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745685 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs\") pod \"3e1faa08-de2a-4f06-a9a6-65245d240c19\" (UID: \"3e1faa08-de2a-4f06-a9a6-65245d240c19\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.745723 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs\") pod \"573b60bf-1459-4e7f-9c71-f453e0d01074\" (UID: \"573b60bf-1459-4e7f-9c71-f453e0d01074\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.746447 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs" (OuterVolumeSpecName: "logs") pod "573b60bf-1459-4e7f-9c71-f453e0d01074" (UID: "573b60bf-1459-4e7f-9c71-f453e0d01074"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.746775 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs" (OuterVolumeSpecName: "logs") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.751114 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/573b60bf-1459-4e7f-9c71-f453e0d01074-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.752808 4929 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3e1faa08-de2a-4f06-a9a6-65245d240c19-logs\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.757530 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt" (OuterVolumeSpecName: "kube-api-access-qhlpt") pod "573b60bf-1459-4e7f-9c71-f453e0d01074" (UID: "573b60bf-1459-4e7f-9c71-f453e0d01074"). InnerVolumeSpecName "kube-api-access-qhlpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.759067 4929 scope.go:117] "RemoveContainer" containerID="98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.763669 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l" (OuterVolumeSpecName: "kube-api-access-kmg8l") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "kube-api-access-kmg8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.783427 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.793287 4929 scope.go:117] "RemoveContainer" containerID="53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.793281 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data" (OuterVolumeSpecName: "config-data") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: E1122 07:55:32.793731 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a\": container with ID starting with 53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a not found: ID does not exist" containerID="53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.793771 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a"} err="failed to get container status \"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a\": rpc error: code = NotFound desc = could not find container \"53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a\": container with ID starting with 53f66bc6bd9d41dc496eeb0f4707d32dd1234f8d504b7dc161063c856bd0bd6a not found: ID does not exist" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.793800 4929 scope.go:117] "RemoveContainer" containerID="98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202" Nov 22 07:55:32 crc kubenswrapper[4929]: E1122 07:55:32.794093 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202\": container with ID starting with 98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202 not found: ID does not exist" containerID="98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.794114 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202"} err="failed to get container status \"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202\": rpc error: code = NotFound desc = could not find container \"98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202\": container with ID starting with 98b105b96ea235fdbd4187e5f2c6dc967d9791bc17e200e03c5188e56e832202 not found: ID does not exist" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.794126 4929 scope.go:117] "RemoveContainer" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.796477 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data" (OuterVolumeSpecName: "config-data") pod "573b60bf-1459-4e7f-9c71-f453e0d01074" (UID: "573b60bf-1459-4e7f-9c71-f453e0d01074"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.803557 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "573b60bf-1459-4e7f-9c71-f453e0d01074" (UID: "573b60bf-1459-4e7f-9c71-f453e0d01074"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.837452 4929 scope.go:117] "RemoveContainer" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" Nov 22 07:55:32 crc kubenswrapper[4929]: E1122 07:55:32.838699 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d\": container with ID starting with 9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d not found: ID does not exist" containerID="9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.838744 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d"} err="failed to get container status \"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d\": rpc error: code = NotFound desc = could not find container \"9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d\": container with ID starting with 9cd1234296209169dea2e499b5dcca70426984e72e9c51b866640f12ad404f5d not found: ID does not exist" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.838785 4929 scope.go:117] "RemoveContainer" containerID="194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.844767 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.854227 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle\") pod \"4a828f0a-c50e-4f76-829b-543df4ac95e9\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.854392 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data\") pod \"4a828f0a-c50e-4f76-829b-543df4ac95e9\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.854542 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wc7ds\" (UniqueName: \"kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds\") pod \"4a828f0a-c50e-4f76-829b-543df4ac95e9\" (UID: \"4a828f0a-c50e-4f76-829b-543df4ac95e9\") " Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.854986 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855003 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855015 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855025 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855033 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhlpt\" (UniqueName: \"kubernetes.io/projected/573b60bf-1459-4e7f-9c71-f453e0d01074-kube-api-access-qhlpt\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855042 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmg8l\" (UniqueName: \"kubernetes.io/projected/3e1faa08-de2a-4f06-a9a6-65245d240c19-kube-api-access-kmg8l\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.855050 4929 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.858726 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds" (OuterVolumeSpecName: "kube-api-access-wc7ds") pod "4a828f0a-c50e-4f76-829b-543df4ac95e9" (UID: "4a828f0a-c50e-4f76-829b-543df4ac95e9"). InnerVolumeSpecName "kube-api-access-wc7ds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.875382 4929 scope.go:117] "RemoveContainer" containerID="9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.889696 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "573b60bf-1459-4e7f-9c71-f453e0d01074" (UID: "573b60bf-1459-4e7f-9c71-f453e0d01074"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.892693 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data" (OuterVolumeSpecName: "config-data") pod "4a828f0a-c50e-4f76-829b-543df4ac95e9" (UID: "4a828f0a-c50e-4f76-829b-543df4ac95e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.896756 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3e1faa08-de2a-4f06-a9a6-65245d240c19" (UID: "3e1faa08-de2a-4f06-a9a6-65245d240c19"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.901142 4929 scope.go:117] "RemoveContainer" containerID="194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43" Nov 22 07:55:32 crc kubenswrapper[4929]: E1122 07:55:32.901986 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43\": container with ID starting with 194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43 not found: ID does not exist" containerID="194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.902018 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43"} err="failed to get container status \"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43\": rpc error: code = NotFound desc = could not find container \"194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43\": container with ID starting with 194f960d8a64566c0a5ace7ef58baa60f2f8d9cb1d0ba51ee8c085477085ee43 not found: ID does not exist" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.902039 4929 scope.go:117] "RemoveContainer" containerID="9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd" Nov 22 07:55:32 crc kubenswrapper[4929]: E1122 07:55:32.902265 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd\": container with ID starting with 9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd not found: ID does not exist" containerID="9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.902360 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd"} err="failed to get container status \"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd\": rpc error: code = NotFound desc = could not find container \"9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd\": container with ID starting with 9cc1a9f10ad5a4e6b7ace51ecf6ed2bd633bf22967c135c41dc4aaa5de6301fd not found: ID does not exist" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.905901 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4a828f0a-c50e-4f76-829b-543df4ac95e9" (UID: "4a828f0a-c50e-4f76-829b-543df4ac95e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.956337 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wc7ds\" (UniqueName: \"kubernetes.io/projected/4a828f0a-c50e-4f76-829b-543df4ac95e9-kube-api-access-wc7ds\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.956371 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.956382 4929 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3e1faa08-de2a-4f06-a9a6-65245d240c19-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.956392 4929 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/573b60bf-1459-4e7f-9c71-f453e0d01074-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:32 crc kubenswrapper[4929]: I1122 07:55:32.956401 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4a828f0a-c50e-4f76-829b-543df4ac95e9-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.063165 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.080531 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.104413 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.114943 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.126702 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127183 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127201 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127234 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerName="nova-scheduler-scheduler" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127241 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerName="nova-scheduler-scheduler" Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127259 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="861208ae-0e23-4cab-9ac4-b08cade2fead" containerName="nova-manage" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127266 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="861208ae-0e23-4cab-9ac4-b08cade2fead" containerName="nova-manage" Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127281 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-api" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127287 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-api" Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127299 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-log" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127307 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-log" Nov 22 07:55:33 crc kubenswrapper[4929]: E1122 07:55:33.127321 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127327 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127551 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="861208ae-0e23-4cab-9ac4-b08cade2fead" containerName="nova-manage" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127563 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-api" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127576 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" containerName="nova-api-log" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127589 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" containerName="nova-scheduler-scheduler" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127601 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-metadata" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.127610 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" containerName="nova-metadata-log" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.137397 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.164558 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.171678 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.171991 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.176255 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.196411 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.209285 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.211511 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.215767 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.215968 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.220257 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.221817 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.223645 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.231607 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.241882 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.250828 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.268560 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h9wz\" (UniqueName: \"kubernetes.io/projected/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-kube-api-access-9h9wz\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.268790 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-config-data\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.268969 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.269079 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-logs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.269135 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-public-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.269167 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.370939 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-config-data\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371009 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr7zq\" (UniqueName: \"kubernetes.io/projected/82e5e3cf-4e6b-4110-bb64-909de233a84a-kube-api-access-mr7zq\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371058 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-config-data\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371076 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82e5e3cf-4e6b-4110-bb64-909de233a84a-logs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371093 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371123 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371152 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371175 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-config-data\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371198 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np8cw\" (UniqueName: \"kubernetes.io/projected/7baf0b74-ace4-4305-b9a4-f9943b7fc888-kube-api-access-np8cw\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371242 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-logs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371283 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-public-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371340 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371510 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h9wz\" (UniqueName: \"kubernetes.io/projected/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-kube-api-access-9h9wz\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.371605 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.372343 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-logs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.375096 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-config-data\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.375123 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-public-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.377060 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.377921 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.390120 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h9wz\" (UniqueName: \"kubernetes.io/projected/e17c146a-02fe-4dc5-8199-c8c3f4ac12a7-kube-api-access-9h9wz\") pod \"nova-api-0\" (UID: \"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7\") " pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.472957 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr7zq\" (UniqueName: \"kubernetes.io/projected/82e5e3cf-4e6b-4110-bb64-909de233a84a-kube-api-access-mr7zq\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473025 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-config-data\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473044 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82e5e3cf-4e6b-4110-bb64-909de233a84a-logs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473062 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473100 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473126 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-config-data\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473147 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-np8cw\" (UniqueName: \"kubernetes.io/projected/7baf0b74-ace4-4305-b9a4-f9943b7fc888-kube-api-access-np8cw\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473226 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.473874 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/82e5e3cf-4e6b-4110-bb64-909de233a84a-logs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.476824 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.476821 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-config-data\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.477159 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82e5e3cf-4e6b-4110-bb64-909de233a84a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.477375 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-config-data\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.477460 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7baf0b74-ace4-4305-b9a4-f9943b7fc888-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.489782 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-np8cw\" (UniqueName: \"kubernetes.io/projected/7baf0b74-ace4-4305-b9a4-f9943b7fc888-kube-api-access-np8cw\") pod \"nova-scheduler-0\" (UID: \"7baf0b74-ace4-4305-b9a4-f9943b7fc888\") " pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.490253 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr7zq\" (UniqueName: \"kubernetes.io/projected/82e5e3cf-4e6b-4110-bb64-909de233a84a-kube-api-access-mr7zq\") pod \"nova-metadata-0\" (UID: \"82e5e3cf-4e6b-4110-bb64-909de233a84a\") " pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.541413 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.555663 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.560915 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.959769 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e1faa08-de2a-4f06-a9a6-65245d240c19" path="/var/lib/kubelet/pods/3e1faa08-de2a-4f06-a9a6-65245d240c19/volumes" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.968781 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a828f0a-c50e-4f76-829b-543df4ac95e9" path="/var/lib/kubelet/pods/4a828f0a-c50e-4f76-829b-543df4ac95e9/volumes" Nov 22 07:55:33 crc kubenswrapper[4929]: I1122 07:55:33.969917 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="573b60bf-1459-4e7f-9c71-f453e0d01074" path="/var/lib/kubelet/pods/573b60bf-1459-4e7f-9c71-f453e0d01074/volumes" Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.040591 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.146018 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.160462 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 07:55:34 crc kubenswrapper[4929]: W1122 07:55:34.161055 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7baf0b74_ace4_4305_b9a4_f9943b7fc888.slice/crio-f0a05a3b43fae74eea4dc4bcf3792b5b96d9ba2cdb332e931eba02f94ee4b5bf WatchSource:0}: Error finding container f0a05a3b43fae74eea4dc4bcf3792b5b96d9ba2cdb332e931eba02f94ee4b5bf: Status 404 returned error can't find the container with id f0a05a3b43fae74eea4dc4bcf3792b5b96d9ba2cdb332e931eba02f94ee4b5bf Nov 22 07:55:34 crc kubenswrapper[4929]: W1122 07:55:34.163893 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82e5e3cf_4e6b_4110_bb64_909de233a84a.slice/crio-95efecc9c22fd49125ec42d8f657b48ceaef0c717de6ae315eb0d48ca8d19a4e WatchSource:0}: Error finding container 95efecc9c22fd49125ec42d8f657b48ceaef0c717de6ae315eb0d48ca8d19a4e: Status 404 returned error can't find the container with id 95efecc9c22fd49125ec42d8f657b48ceaef0c717de6ae315eb0d48ca8d19a4e Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.766790 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7","Type":"ContainerStarted","Data":"44c23edd62181402d0c2ae917cd01f9f3f810e78657626d1eccef81ca63932e9"} Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.768530 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82e5e3cf-4e6b-4110-bb64-909de233a84a","Type":"ContainerStarted","Data":"95efecc9c22fd49125ec42d8f657b48ceaef0c717de6ae315eb0d48ca8d19a4e"} Nov 22 07:55:34 crc kubenswrapper[4929]: I1122 07:55:34.769874 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7baf0b74-ace4-4305-b9a4-f9943b7fc888","Type":"ContainerStarted","Data":"f0a05a3b43fae74eea4dc4bcf3792b5b96d9ba2cdb332e931eba02f94ee4b5bf"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.781630 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7","Type":"ContainerStarted","Data":"52659c395f5d3f07d165b66bc1822500d94299056667d45269f15412fc0b09c7"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.781744 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e17c146a-02fe-4dc5-8199-c8c3f4ac12a7","Type":"ContainerStarted","Data":"90813d6df7287a83b859b5c3b2534699a9fa3382274859b42803b766edbe3f5d"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.783302 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82e5e3cf-4e6b-4110-bb64-909de233a84a","Type":"ContainerStarted","Data":"f4ddc5370e51e9c03f35c4c83646497e35586924019dcda5f73d456b85d52e41"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.783329 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"82e5e3cf-4e6b-4110-bb64-909de233a84a","Type":"ContainerStarted","Data":"257730b27917228691efb4fe69497c108ffa95cee9eefc3f42c67e6523cfd573"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.785544 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7baf0b74-ace4-4305-b9a4-f9943b7fc888","Type":"ContainerStarted","Data":"e1347239416e6ba55f4bd391c2def3ac282f19cc990e289d0d81ff455dd30739"} Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.818435 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.81841508 podStartE2EDuration="2.81841508s" podCreationTimestamp="2025-11-22 07:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:55:35.808547684 +0000 UTC m=+2672.918001697" watchObservedRunningTime="2025-11-22 07:55:35.81841508 +0000 UTC m=+2672.927869093" Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.843329 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.843308719 podStartE2EDuration="2.843308719s" podCreationTimestamp="2025-11-22 07:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:55:35.82969782 +0000 UTC m=+2672.939151853" watchObservedRunningTime="2025-11-22 07:55:35.843308719 +0000 UTC m=+2672.952762732" Nov 22 07:55:35 crc kubenswrapper[4929]: I1122 07:55:35.895554 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.895509258 podStartE2EDuration="2.895509258s" podCreationTimestamp="2025-11-22 07:55:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 07:55:35.865637195 +0000 UTC m=+2672.975091238" watchObservedRunningTime="2025-11-22 07:55:35.895509258 +0000 UTC m=+2673.004963281" Nov 22 07:55:38 crc kubenswrapper[4929]: I1122 07:55:38.556124 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:55:38 crc kubenswrapper[4929]: I1122 07:55:38.556475 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 07:55:38 crc kubenswrapper[4929]: I1122 07:55:38.562402 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 07:55:39 crc kubenswrapper[4929]: I1122 07:55:39.949859 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:55:39 crc kubenswrapper[4929]: E1122 07:55:39.952270 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.541869 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.542283 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.556377 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.556597 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.562249 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.600959 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 07:55:43 crc kubenswrapper[4929]: I1122 07:55:43.917120 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 07:55:44 crc kubenswrapper[4929]: I1122 07:55:44.555441 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e17c146a-02fe-4dc5-8199-c8c3f4ac12a7" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.237:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:55:44 crc kubenswrapper[4929]: I1122 07:55:44.555480 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e17c146a-02fe-4dc5-8199-c8c3f4ac12a7" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.237:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 07:55:44 crc kubenswrapper[4929]: I1122 07:55:44.565518 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="82e5e3cf-4e6b-4110-bb64-909de233a84a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.238:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:55:44 crc kubenswrapper[4929]: I1122 07:55:44.565577 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="82e5e3cf-4e6b-4110-bb64-909de233a84a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.238:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 07:55:50 crc kubenswrapper[4929]: I1122 07:55:50.947608 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:55:50 crc kubenswrapper[4929]: E1122 07:55:50.948340 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.556404 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.556859 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.557445 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.557499 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.579810 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.580117 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.580309 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.580761 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.600917 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 07:55:53 crc kubenswrapper[4929]: I1122 07:55:53.603513 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 07:56:05 crc kubenswrapper[4929]: I1122 07:56:05.947107 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:56:05 crc kubenswrapper[4929]: E1122 07:56:05.947938 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:56:11 crc kubenswrapper[4929]: I1122 07:56:11.672407 4929 scope.go:117] "RemoveContainer" containerID="561660201fd6b21e088e8e5583de7e6f59268d27fc8a1f1a0cfe988441824098" Nov 22 07:56:11 crc kubenswrapper[4929]: I1122 07:56:11.697877 4929 scope.go:117] "RemoveContainer" containerID="05d89016841dbe3c5ab5031215bead7cec49f477e73ca138789611724d8f9edd" Nov 22 07:56:11 crc kubenswrapper[4929]: I1122 07:56:11.759693 4929 scope.go:117] "RemoveContainer" containerID="ed8aef95546e74d357af6356c52f0dc6f9dab6eeced8d1b329f82e3c3da590e7" Nov 22 07:56:17 crc kubenswrapper[4929]: I1122 07:56:17.947627 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:56:17 crc kubenswrapper[4929]: E1122 07:56:17.948408 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:56:21 crc kubenswrapper[4929]: I1122 07:56:21.046980 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-rsm44"] Nov 22 07:56:21 crc kubenswrapper[4929]: I1122 07:56:21.056149 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-rsm44"] Nov 22 07:56:21 crc kubenswrapper[4929]: I1122 07:56:21.960686 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="155c1bdd-4b26-4059-8ab7-7a6299bc17c9" path="/var/lib/kubelet/pods/155c1bdd-4b26-4059-8ab7-7a6299bc17c9/volumes" Nov 22 07:56:31 crc kubenswrapper[4929]: I1122 07:56:31.947245 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:56:31 crc kubenswrapper[4929]: E1122 07:56:31.947984 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:56:46 crc kubenswrapper[4929]: I1122 07:56:46.947179 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:56:46 crc kubenswrapper[4929]: E1122 07:56:46.947894 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.048187 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-3a16-account-create-hz4wf"] Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.057605 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-tpt6v"] Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.064709 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-tpt6v"] Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.071864 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-3a16-account-create-hz4wf"] Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.958299 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a55beaa-874f-44cf-bdc3-1eb292fb8ae7" path="/var/lib/kubelet/pods/3a55beaa-874f-44cf-bdc3-1eb292fb8ae7/volumes" Nov 22 07:56:49 crc kubenswrapper[4929]: I1122 07:56:49.959122 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a59addba-444c-4d86-bd65-d8796e750a94" path="/var/lib/kubelet/pods/a59addba-444c-4d86-bd65-d8796e750a94/volumes" Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.028767 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-59xgj"] Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.039763 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-vpgb4"] Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.049399 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-59xgj"] Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.057948 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-vpgb4"] Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.960980 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="701a52ea-cd55-4a02-9662-18d5899a3324" path="/var/lib/kubelet/pods/701a52ea-cd55-4a02-9662-18d5899a3324/volumes" Nov 22 07:56:51 crc kubenswrapper[4929]: I1122 07:56:51.961990 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9dac5799-42d7-4917-ad44-68640f1526ff" path="/var/lib/kubelet/pods/9dac5799-42d7-4917-ad44-68640f1526ff/volumes" Nov 22 07:56:52 crc kubenswrapper[4929]: I1122 07:56:52.033421 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f286-account-create-rvfxv"] Nov 22 07:56:52 crc kubenswrapper[4929]: I1122 07:56:52.044403 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a212-account-create-b44cg"] Nov 22 07:56:52 crc kubenswrapper[4929]: I1122 07:56:52.054709 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a212-account-create-b44cg"] Nov 22 07:56:52 crc kubenswrapper[4929]: I1122 07:56:52.063008 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f286-account-create-rvfxv"] Nov 22 07:56:53 crc kubenswrapper[4929]: I1122 07:56:53.963148 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8726cb86-7598-4300-aa4a-4c4dd8deaa3d" path="/var/lib/kubelet/pods/8726cb86-7598-4300-aa4a-4c4dd8deaa3d/volumes" Nov 22 07:56:53 crc kubenswrapper[4929]: I1122 07:56:53.964164 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d84fa1ff-4863-4730-be5b-a2ae8516dc71" path="/var/lib/kubelet/pods/d84fa1ff-4863-4730-be5b-a2ae8516dc71/volumes" Nov 22 07:56:58 crc kubenswrapper[4929]: I1122 07:56:58.947415 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 07:56:59 crc kubenswrapper[4929]: I1122 07:56:59.659329 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997"} Nov 22 07:57:11 crc kubenswrapper[4929]: I1122 07:57:11.868079 4929 scope.go:117] "RemoveContainer" containerID="d4c3449574449704a8cecef90edaeef7595bf637008703f6f64666d32277eaf6" Nov 22 07:57:11 crc kubenswrapper[4929]: I1122 07:57:11.948605 4929 scope.go:117] "RemoveContainer" containerID="0ef66ad1b8e294a458a52c4e6b0e9e7131283f1e32e3943ac5e082f98e127322" Nov 22 07:57:12 crc kubenswrapper[4929]: I1122 07:57:12.024231 4929 scope.go:117] "RemoveContainer" containerID="4c8d130ba2bfc229564add643c163fd3b2720216177366f184bd57174725d9b2" Nov 22 07:57:12 crc kubenswrapper[4929]: I1122 07:57:12.072061 4929 scope.go:117] "RemoveContainer" containerID="6aedd34efb89a1c4786abb64e770987ec7c8ba89b55f49603792b39642d5a435" Nov 22 07:57:12 crc kubenswrapper[4929]: I1122 07:57:12.102751 4929 scope.go:117] "RemoveContainer" containerID="5caad4214777e1cd83891d1edd9db843145698e5a94efda6149ce28bca8f1f97" Nov 22 07:57:12 crc kubenswrapper[4929]: I1122 07:57:12.264869 4929 scope.go:117] "RemoveContainer" containerID="365572ecc13ab35539eff2b0acd25a79fbdcd1258856e14c44fcb313f11b7e8f" Nov 22 07:57:12 crc kubenswrapper[4929]: I1122 07:57:12.283517 4929 scope.go:117] "RemoveContainer" containerID="c90db54ce8a09fb86806a636353f0d15e7a425187d70c10912178f7de56938b5" Nov 22 07:57:43 crc kubenswrapper[4929]: I1122 07:57:43.471858 4929 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-5b65879b9f-vjcj8" podUID="3971fe14-e65d-4a66-a0ce-d004f1f4e0f1" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Nov 22 07:58:36 crc kubenswrapper[4929]: I1122 07:58:36.037161 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-9cvcv"] Nov 22 07:58:36 crc kubenswrapper[4929]: I1122 07:58:36.044998 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-9cvcv"] Nov 22 07:58:37 crc kubenswrapper[4929]: I1122 07:58:37.959861 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e5572bd-b4bf-4476-9247-06d7c892dcf1" path="/var/lib/kubelet/pods/7e5572bd-b4bf-4476-9247-06d7c892dcf1/volumes" Nov 22 07:58:40 crc kubenswrapper[4929]: I1122 07:58:40.031603 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-r6lzk"] Nov 22 07:58:40 crc kubenswrapper[4929]: I1122 07:58:40.039954 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-r6lzk"] Nov 22 07:58:41 crc kubenswrapper[4929]: I1122 07:58:41.958823 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c" path="/var/lib/kubelet/pods/4c98cd8c-8d42-4c8e-9ff9-84de2dafb18c/volumes" Nov 22 07:58:42 crc kubenswrapper[4929]: I1122 07:58:42.030179 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-q7499"] Nov 22 07:58:42 crc kubenswrapper[4929]: I1122 07:58:42.042968 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-q7499"] Nov 22 07:58:43 crc kubenswrapper[4929]: I1122 07:58:43.963073 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a027f20-aeb5-4af3-9ccc-c4271d8717d1" path="/var/lib/kubelet/pods/8a027f20-aeb5-4af3-9ccc-c4271d8717d1/volumes" Nov 22 07:59:12 crc kubenswrapper[4929]: I1122 07:59:12.557695 4929 scope.go:117] "RemoveContainer" containerID="5cc0a46aa7a491ff2d9f413a47650e35c1e27c38bbf7da737dba0db4e69791b5" Nov 22 07:59:12 crc kubenswrapper[4929]: I1122 07:59:12.614923 4929 scope.go:117] "RemoveContainer" containerID="fdbc2fa7daba9dc24b636806a887c51ccd87efa8c07ad9d0eef10f7bb914a961" Nov 22 07:59:12 crc kubenswrapper[4929]: I1122 07:59:12.661094 4929 scope.go:117] "RemoveContainer" containerID="5dde2d3f2168bf10e3c514fbd3d384a32beed85e1e86d848393685a49e346cca" Nov 22 07:59:18 crc kubenswrapper[4929]: I1122 07:59:18.595310 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:59:18 crc kubenswrapper[4929]: I1122 07:59:18.595883 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 07:59:48 crc kubenswrapper[4929]: I1122 07:59:48.595296 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 07:59:48 crc kubenswrapper[4929]: I1122 07:59:48.595926 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.178150 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52"] Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.180426 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.182511 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.183170 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.202817 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52"] Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.283563 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.283836 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tnpq\" (UniqueName: \"kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.284040 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.385690 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.385844 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tnpq\" (UniqueName: \"kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.385956 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.386845 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.393245 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.405907 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tnpq\" (UniqueName: \"kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq\") pod \"collect-profiles-29396640-6tw52\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:00 crc kubenswrapper[4929]: I1122 08:00:00.510862 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:01 crc kubenswrapper[4929]: I1122 08:00:01.003567 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52"] Nov 22 08:00:01 crc kubenswrapper[4929]: I1122 08:00:01.642194 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" event={"ID":"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23","Type":"ContainerStarted","Data":"96d8a946644824c210c0e06e131a48aca7d7799c0bd4e1722fcc63eb0a20821b"} Nov 22 08:00:01 crc kubenswrapper[4929]: I1122 08:00:01.642342 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" event={"ID":"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23","Type":"ContainerStarted","Data":"69a0b2e13fff9967d7fd5686b7789aae8d1eba99a138112d72d455e2c0e79c32"} Nov 22 08:00:02 crc kubenswrapper[4929]: I1122 08:00:02.657879 4929 generic.go:334] "Generic (PLEG): container finished" podID="ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" containerID="96d8a946644824c210c0e06e131a48aca7d7799c0bd4e1722fcc63eb0a20821b" exitCode=0 Nov 22 08:00:02 crc kubenswrapper[4929]: I1122 08:00:02.657931 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" event={"ID":"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23","Type":"ContainerDied","Data":"96d8a946644824c210c0e06e131a48aca7d7799c0bd4e1722fcc63eb0a20821b"} Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.023265 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.164126 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume\") pod \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.164364 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tnpq\" (UniqueName: \"kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq\") pod \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.164415 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume\") pod \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\" (UID: \"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23\") " Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.165362 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume" (OuterVolumeSpecName: "config-volume") pod "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" (UID: "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.171685 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" (UID: "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.172467 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq" (OuterVolumeSpecName: "kube-api-access-2tnpq") pod "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" (UID: "ec23b237-dd46-4f3a-94a7-a3b2cc2cff23"). InnerVolumeSpecName "kube-api-access-2tnpq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.267931 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tnpq\" (UniqueName: \"kubernetes.io/projected/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-kube-api-access-2tnpq\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.268414 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.268423 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec23b237-dd46-4f3a-94a7-a3b2cc2cff23-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.681444 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" event={"ID":"ec23b237-dd46-4f3a-94a7-a3b2cc2cff23","Type":"ContainerDied","Data":"69a0b2e13fff9967d7fd5686b7789aae8d1eba99a138112d72d455e2c0e79c32"} Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.681494 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396640-6tw52" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.681495 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69a0b2e13fff9967d7fd5686b7789aae8d1eba99a138112d72d455e2c0e79c32" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.940372 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:04 crc kubenswrapper[4929]: E1122 08:00:04.940797 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" containerName="collect-profiles" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.940816 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" containerName="collect-profiles" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.940983 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec23b237-dd46-4f3a-94a7-a3b2cc2cff23" containerName="collect-profiles" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.942463 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:04 crc kubenswrapper[4929]: I1122 08:00:04.958599 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.084104 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.084230 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn8w4\" (UniqueName: \"kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.084484 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.097307 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr"] Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.103592 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396595-57zrr"] Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.186562 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.186685 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.186741 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn8w4\" (UniqueName: \"kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.187115 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.187244 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.210329 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn8w4\" (UniqueName: \"kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4\") pod \"certified-operators-r5nrq\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.281546 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.624133 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:05 crc kubenswrapper[4929]: W1122 08:00:05.630153 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32281267_0743_45da_9249_ce9bfee17bd0.slice/crio-ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a WatchSource:0}: Error finding container ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a: Status 404 returned error can't find the container with id ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.691411 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerStarted","Data":"ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a"} Nov 22 08:00:05 crc kubenswrapper[4929]: I1122 08:00:05.966606 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e73fb56-aba3-457d-813f-2087e73a8ea3" path="/var/lib/kubelet/pods/6e73fb56-aba3-457d-813f-2087e73a8ea3/volumes" Nov 22 08:00:06 crc kubenswrapper[4929]: I1122 08:00:06.707622 4929 generic.go:334] "Generic (PLEG): container finished" podID="32281267-0743-45da-9249-ce9bfee17bd0" containerID="84f9f1c35f10a2958f5931adecf0660deba76d33196e95ad9af570df2bf612f3" exitCode=0 Nov 22 08:00:06 crc kubenswrapper[4929]: I1122 08:00:06.707685 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerDied","Data":"84f9f1c35f10a2958f5931adecf0660deba76d33196e95ad9af570df2bf612f3"} Nov 22 08:00:06 crc kubenswrapper[4929]: I1122 08:00:06.712490 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 08:00:08 crc kubenswrapper[4929]: I1122 08:00:08.730532 4929 generic.go:334] "Generic (PLEG): container finished" podID="32281267-0743-45da-9249-ce9bfee17bd0" containerID="ca89836a8af43866752dbd81f549c168211949ed4aed22a0fc856144ec7e9197" exitCode=0 Nov 22 08:00:08 crc kubenswrapper[4929]: I1122 08:00:08.730574 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerDied","Data":"ca89836a8af43866752dbd81f549c168211949ed4aed22a0fc856144ec7e9197"} Nov 22 08:00:10 crc kubenswrapper[4929]: I1122 08:00:10.762544 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerStarted","Data":"1219ce4aabfc847355138a1b41038f8de39c771d9c86a03ec929a438847c94a7"} Nov 22 08:00:10 crc kubenswrapper[4929]: I1122 08:00:10.796040 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r5nrq" podStartSLOduration=4.081646598 podStartE2EDuration="6.796021709s" podCreationTimestamp="2025-11-22 08:00:04 +0000 UTC" firstStartedPulling="2025-11-22 08:00:06.711798597 +0000 UTC m=+2943.821252650" lastFinishedPulling="2025-11-22 08:00:09.426173748 +0000 UTC m=+2946.535627761" observedRunningTime="2025-11-22 08:00:10.785320733 +0000 UTC m=+2947.894774766" watchObservedRunningTime="2025-11-22 08:00:10.796021709 +0000 UTC m=+2947.905475722" Nov 22 08:00:12 crc kubenswrapper[4929]: I1122 08:00:12.777248 4929 scope.go:117] "RemoveContainer" containerID="570540a0454e39cd47d427b163de64205e045651fbca02789386d4cce9f16aa3" Nov 22 08:00:12 crc kubenswrapper[4929]: I1122 08:00:12.809344 4929 scope.go:117] "RemoveContainer" containerID="03a64f9e9e270bb3aaf7560a7bb580832c2b0538b9c46c59d8f4bc575b7cc191" Nov 22 08:00:12 crc kubenswrapper[4929]: I1122 08:00:12.834542 4929 scope.go:117] "RemoveContainer" containerID="112dd1cde7025f6cd26eb94cb06ec11ed38cf7854b5aea3de0630122e7ebf6e0" Nov 22 08:00:12 crc kubenswrapper[4929]: I1122 08:00:12.860835 4929 scope.go:117] "RemoveContainer" containerID="8843328384b040163cb90d2dd7fc8fea147e7308e41aa125aa370f97494c059e" Nov 22 08:00:12 crc kubenswrapper[4929]: I1122 08:00:12.920781 4929 scope.go:117] "RemoveContainer" containerID="c5003a790fe5d9814ac1325156007fa0bfe6d0a83b3b82189fdf62ffa0b95a4f" Nov 22 08:00:15 crc kubenswrapper[4929]: I1122 08:00:15.282484 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:15 crc kubenswrapper[4929]: I1122 08:00:15.284610 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:15 crc kubenswrapper[4929]: I1122 08:00:15.338471 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:15 crc kubenswrapper[4929]: I1122 08:00:15.873348 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:15 crc kubenswrapper[4929]: I1122 08:00:15.926194 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:17 crc kubenswrapper[4929]: I1122 08:00:17.835762 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r5nrq" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="registry-server" containerID="cri-o://1219ce4aabfc847355138a1b41038f8de39c771d9c86a03ec929a438847c94a7" gracePeriod=2 Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.594379 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.594964 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.595035 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.596096 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.596164 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997" gracePeriod=600 Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.850348 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997" exitCode=0 Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.850447 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997"} Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.851368 4929 scope.go:117] "RemoveContainer" containerID="b5f322ba595d56d3148e95b22c7629e77472c7f36c95ac972819de1865aaf84c" Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.856675 4929 generic.go:334] "Generic (PLEG): container finished" podID="32281267-0743-45da-9249-ce9bfee17bd0" containerID="1219ce4aabfc847355138a1b41038f8de39c771d9c86a03ec929a438847c94a7" exitCode=0 Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.856729 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerDied","Data":"1219ce4aabfc847355138a1b41038f8de39c771d9c86a03ec929a438847c94a7"} Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.856816 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r5nrq" event={"ID":"32281267-0743-45da-9249-ce9bfee17bd0","Type":"ContainerDied","Data":"ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a"} Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.856874 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ada235a5a03ecac82a587ef775c9faf1065c59099314edd83559948c3983ac8a" Nov 22 08:00:18 crc kubenswrapper[4929]: I1122 08:00:18.889464 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.061173 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities\") pod \"32281267-0743-45da-9249-ce9bfee17bd0\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.061299 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn8w4\" (UniqueName: \"kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4\") pod \"32281267-0743-45da-9249-ce9bfee17bd0\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.061348 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content\") pod \"32281267-0743-45da-9249-ce9bfee17bd0\" (UID: \"32281267-0743-45da-9249-ce9bfee17bd0\") " Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.078489 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities" (OuterVolumeSpecName: "utilities") pod "32281267-0743-45da-9249-ce9bfee17bd0" (UID: "32281267-0743-45da-9249-ce9bfee17bd0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.082959 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4" (OuterVolumeSpecName: "kube-api-access-fn8w4") pod "32281267-0743-45da-9249-ce9bfee17bd0" (UID: "32281267-0743-45da-9249-ce9bfee17bd0"). InnerVolumeSpecName "kube-api-access-fn8w4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.142560 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32281267-0743-45da-9249-ce9bfee17bd0" (UID: "32281267-0743-45da-9249-ce9bfee17bd0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.165714 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.165802 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn8w4\" (UniqueName: \"kubernetes.io/projected/32281267-0743-45da-9249-ce9bfee17bd0-kube-api-access-fn8w4\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.165835 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32281267-0743-45da-9249-ce9bfee17bd0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.870277 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531"} Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.870374 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r5nrq" Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.920053 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.928492 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r5nrq"] Nov 22 08:00:19 crc kubenswrapper[4929]: I1122 08:00:19.959786 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32281267-0743-45da-9249-ce9bfee17bd0" path="/var/lib/kubelet/pods/32281267-0743-45da-9249-ce9bfee17bd0/volumes" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.163370 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29396641-jdmjw"] Nov 22 08:01:00 crc kubenswrapper[4929]: E1122 08:01:00.164931 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="registry-server" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.164965 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="registry-server" Nov 22 08:01:00 crc kubenswrapper[4929]: E1122 08:01:00.165018 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="extract-content" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.165034 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="extract-content" Nov 22 08:01:00 crc kubenswrapper[4929]: E1122 08:01:00.165067 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="extract-utilities" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.165083 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="extract-utilities" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.165581 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="32281267-0743-45da-9249-ce9bfee17bd0" containerName="registry-server" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.167290 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.177632 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396641-jdmjw"] Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.336001 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.336419 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.338425 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.338476 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqslw\" (UniqueName: \"kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.440114 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.440193 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.440318 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.440348 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqslw\" (UniqueName: \"kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.445641 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.445759 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.458593 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.468624 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqslw\" (UniqueName: \"kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw\") pod \"keystone-cron-29396641-jdmjw\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.488109 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:00 crc kubenswrapper[4929]: I1122 08:01:00.958923 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396641-jdmjw"] Nov 22 08:01:00 crc kubenswrapper[4929]: W1122 08:01:00.971568 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9c6000c_4848_4dcb_a01e_2f3a33a1810f.slice/crio-3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671 WatchSource:0}: Error finding container 3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671: Status 404 returned error can't find the container with id 3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671 Nov 22 08:01:01 crc kubenswrapper[4929]: I1122 08:01:01.340562 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396641-jdmjw" event={"ID":"c9c6000c-4848-4dcb-a01e-2f3a33a1810f","Type":"ContainerStarted","Data":"3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671"} Nov 22 08:01:02 crc kubenswrapper[4929]: I1122 08:01:02.350721 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396641-jdmjw" event={"ID":"c9c6000c-4848-4dcb-a01e-2f3a33a1810f","Type":"ContainerStarted","Data":"eb363074d542ca99b85e167599aaf1a4950f3fef82c9d394801a5f7b385a3e61"} Nov 22 08:01:04 crc kubenswrapper[4929]: I1122 08:01:04.375578 4929 generic.go:334] "Generic (PLEG): container finished" podID="c9c6000c-4848-4dcb-a01e-2f3a33a1810f" containerID="eb363074d542ca99b85e167599aaf1a4950f3fef82c9d394801a5f7b385a3e61" exitCode=0 Nov 22 08:01:04 crc kubenswrapper[4929]: I1122 08:01:04.376271 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396641-jdmjw" event={"ID":"c9c6000c-4848-4dcb-a01e-2f3a33a1810f","Type":"ContainerDied","Data":"eb363074d542ca99b85e167599aaf1a4950f3fef82c9d394801a5f7b385a3e61"} Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.765815 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.866812 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys\") pod \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.866930 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data\") pod \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.867027 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle\") pod \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.867051 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqslw\" (UniqueName: \"kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw\") pod \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\" (UID: \"c9c6000c-4848-4dcb-a01e-2f3a33a1810f\") " Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.872706 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c9c6000c-4848-4dcb-a01e-2f3a33a1810f" (UID: "c9c6000c-4848-4dcb-a01e-2f3a33a1810f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.879979 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw" (OuterVolumeSpecName: "kube-api-access-gqslw") pod "c9c6000c-4848-4dcb-a01e-2f3a33a1810f" (UID: "c9c6000c-4848-4dcb-a01e-2f3a33a1810f"). InnerVolumeSpecName "kube-api-access-gqslw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.897952 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9c6000c-4848-4dcb-a01e-2f3a33a1810f" (UID: "c9c6000c-4848-4dcb-a01e-2f3a33a1810f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.924869 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data" (OuterVolumeSpecName: "config-data") pod "c9c6000c-4848-4dcb-a01e-2f3a33a1810f" (UID: "c9c6000c-4848-4dcb-a01e-2f3a33a1810f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.969385 4929 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.969424 4929 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.969436 4929 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:05 crc kubenswrapper[4929]: I1122 08:01:05.969449 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqslw\" (UniqueName: \"kubernetes.io/projected/c9c6000c-4848-4dcb-a01e-2f3a33a1810f-kube-api-access-gqslw\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:06 crc kubenswrapper[4929]: I1122 08:01:06.401021 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396641-jdmjw" event={"ID":"c9c6000c-4848-4dcb-a01e-2f3a33a1810f","Type":"ContainerDied","Data":"3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671"} Nov 22 08:01:06 crc kubenswrapper[4929]: I1122 08:01:06.401086 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c5072206d53d6b9dd05816f3b7ecbe256bcc262751c72cff010bb12fc8ab671" Nov 22 08:01:06 crc kubenswrapper[4929]: I1122 08:01:06.401103 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396641-jdmjw" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.889811 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:19 crc kubenswrapper[4929]: E1122 08:01:19.892733 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9c6000c-4848-4dcb-a01e-2f3a33a1810f" containerName="keystone-cron" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.892756 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9c6000c-4848-4dcb-a01e-2f3a33a1810f" containerName="keystone-cron" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.893014 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9c6000c-4848-4dcb-a01e-2f3a33a1810f" containerName="keystone-cron" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.894805 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.918530 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.969828 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.969985 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c44ts\" (UniqueName: \"kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:19 crc kubenswrapper[4929]: I1122 08:01:19.970194 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.072773 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.072835 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c44ts\" (UniqueName: \"kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.072985 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.073816 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.074028 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.097047 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c44ts\" (UniqueName: \"kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts\") pod \"community-operators-w5fsw\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.231444 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:20 crc kubenswrapper[4929]: I1122 08:01:20.825604 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:21 crc kubenswrapper[4929]: I1122 08:01:21.545710 4929 generic.go:334] "Generic (PLEG): container finished" podID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerID="af5400e19a6727c9353bdf3461e57fd052f050ad0da9dee688f918b293800f4c" exitCode=0 Nov 22 08:01:21 crc kubenswrapper[4929]: I1122 08:01:21.546005 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerDied","Data":"af5400e19a6727c9353bdf3461e57fd052f050ad0da9dee688f918b293800f4c"} Nov 22 08:01:21 crc kubenswrapper[4929]: I1122 08:01:21.546032 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerStarted","Data":"41ea50a89b847a0f1726ff5acfb56406a264df216db0daedf93ac151b832e18b"} Nov 22 08:01:25 crc kubenswrapper[4929]: I1122 08:01:25.588863 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerStarted","Data":"14f908a6a06b2f80e7914e37f2fb2c745e5987bb0a9756ae549e53b5b7209bdc"} Nov 22 08:01:28 crc kubenswrapper[4929]: I1122 08:01:28.636943 4929 generic.go:334] "Generic (PLEG): container finished" podID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerID="14f908a6a06b2f80e7914e37f2fb2c745e5987bb0a9756ae549e53b5b7209bdc" exitCode=0 Nov 22 08:01:28 crc kubenswrapper[4929]: I1122 08:01:28.637080 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerDied","Data":"14f908a6a06b2f80e7914e37f2fb2c745e5987bb0a9756ae549e53b5b7209bdc"} Nov 22 08:01:31 crc kubenswrapper[4929]: I1122 08:01:31.673435 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerStarted","Data":"b16dbf5663733183c31f3b26ac0991ad9df2fa0fbaa153439984619820c7f278"} Nov 22 08:01:31 crc kubenswrapper[4929]: I1122 08:01:31.716898 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w5fsw" podStartSLOduration=5.078963044 podStartE2EDuration="12.716877828s" podCreationTimestamp="2025-11-22 08:01:19 +0000 UTC" firstStartedPulling="2025-11-22 08:01:22.556793218 +0000 UTC m=+3019.666247231" lastFinishedPulling="2025-11-22 08:01:30.194707992 +0000 UTC m=+3027.304162015" observedRunningTime="2025-11-22 08:01:31.71173476 +0000 UTC m=+3028.821188773" watchObservedRunningTime="2025-11-22 08:01:31.716877828 +0000 UTC m=+3028.826331841" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.035720 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.038116 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.067615 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.147391 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.147556 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p8td\" (UniqueName: \"kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.147736 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.249687 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.249789 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p8td\" (UniqueName: \"kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.249831 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.250440 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.250546 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.275546 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p8td\" (UniqueName: \"kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td\") pod \"redhat-operators-kszbt\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.373056 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:32 crc kubenswrapper[4929]: I1122 08:01:32.906009 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:01:33 crc kubenswrapper[4929]: I1122 08:01:33.691664 4929 generic.go:334] "Generic (PLEG): container finished" podID="92167854-bed9-49d3-a046-1585524bd8ee" containerID="c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97" exitCode=0 Nov 22 08:01:33 crc kubenswrapper[4929]: I1122 08:01:33.691706 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerDied","Data":"c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97"} Nov 22 08:01:33 crc kubenswrapper[4929]: I1122 08:01:33.691926 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerStarted","Data":"6d5cea2423c22555711307a30d3044e4d063332a0585acf39a7676a3b206590e"} Nov 22 08:01:40 crc kubenswrapper[4929]: I1122 08:01:40.232550 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:40 crc kubenswrapper[4929]: I1122 08:01:40.233447 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:40 crc kubenswrapper[4929]: I1122 08:01:40.334031 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:40 crc kubenswrapper[4929]: I1122 08:01:40.824573 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:40 crc kubenswrapper[4929]: I1122 08:01:40.883503 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:41 crc kubenswrapper[4929]: I1122 08:01:41.785913 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerStarted","Data":"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55"} Nov 22 08:01:42 crc kubenswrapper[4929]: I1122 08:01:42.795683 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w5fsw" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="registry-server" containerID="cri-o://b16dbf5663733183c31f3b26ac0991ad9df2fa0fbaa153439984619820c7f278" gracePeriod=2 Nov 22 08:01:44 crc kubenswrapper[4929]: I1122 08:01:44.814735 4929 generic.go:334] "Generic (PLEG): container finished" podID="92167854-bed9-49d3-a046-1585524bd8ee" containerID="aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55" exitCode=0 Nov 22 08:01:44 crc kubenswrapper[4929]: I1122 08:01:44.814831 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerDied","Data":"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55"} Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.798933 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.930123 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c44ts\" (UniqueName: \"kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts\") pod \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.930206 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content\") pod \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.930322 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities\") pod \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\" (UID: \"63a57dbe-0ede-40a4-bcdf-b59556f2d59e\") " Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.931726 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities" (OuterVolumeSpecName: "utilities") pod "63a57dbe-0ede-40a4-bcdf-b59556f2d59e" (UID: "63a57dbe-0ede-40a4-bcdf-b59556f2d59e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.945791 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts" (OuterVolumeSpecName: "kube-api-access-c44ts") pod "63a57dbe-0ede-40a4-bcdf-b59556f2d59e" (UID: "63a57dbe-0ede-40a4-bcdf-b59556f2d59e"). InnerVolumeSpecName "kube-api-access-c44ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.948595 4929 generic.go:334] "Generic (PLEG): container finished" podID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerID="b16dbf5663733183c31f3b26ac0991ad9df2fa0fbaa153439984619820c7f278" exitCode=0 Nov 22 08:01:47 crc kubenswrapper[4929]: I1122 08:01:47.983383 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63a57dbe-0ede-40a4-bcdf-b59556f2d59e" (UID: "63a57dbe-0ede-40a4-bcdf-b59556f2d59e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.000312 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerDied","Data":"b16dbf5663733183c31f3b26ac0991ad9df2fa0fbaa153439984619820c7f278"} Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.000382 4929 scope.go:117] "RemoveContainer" containerID="b16dbf5663733183c31f3b26ac0991ad9df2fa0fbaa153439984619820c7f278" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.033323 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c44ts\" (UniqueName: \"kubernetes.io/projected/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-kube-api-access-c44ts\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.033349 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.033361 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63a57dbe-0ede-40a4-bcdf-b59556f2d59e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.366039 4929 scope.go:117] "RemoveContainer" containerID="14f908a6a06b2f80e7914e37f2fb2c745e5987bb0a9756ae549e53b5b7209bdc" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.434021 4929 scope.go:117] "RemoveContainer" containerID="af5400e19a6727c9353bdf3461e57fd052f050ad0da9dee688f918b293800f4c" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.966938 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w5fsw" Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.966934 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w5fsw" event={"ID":"63a57dbe-0ede-40a4-bcdf-b59556f2d59e","Type":"ContainerDied","Data":"41ea50a89b847a0f1726ff5acfb56406a264df216db0daedf93ac151b832e18b"} Nov 22 08:01:48 crc kubenswrapper[4929]: I1122 08:01:48.972555 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerStarted","Data":"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531"} Nov 22 08:01:49 crc kubenswrapper[4929]: I1122 08:01:49.009150 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:49 crc kubenswrapper[4929]: I1122 08:01:49.019737 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w5fsw"] Nov 22 08:01:49 crc kubenswrapper[4929]: I1122 08:01:49.959687 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" path="/var/lib/kubelet/pods/63a57dbe-0ede-40a4-bcdf-b59556f2d59e/volumes" Nov 22 08:01:50 crc kubenswrapper[4929]: I1122 08:01:50.008878 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kszbt" podStartSLOduration=3.056271296 podStartE2EDuration="18.00885616s" podCreationTimestamp="2025-11-22 08:01:32 +0000 UTC" firstStartedPulling="2025-11-22 08:01:33.6938325 +0000 UTC m=+3030.803286513" lastFinishedPulling="2025-11-22 08:01:48.646417364 +0000 UTC m=+3045.755871377" observedRunningTime="2025-11-22 08:01:50.00404586 +0000 UTC m=+3047.113499873" watchObservedRunningTime="2025-11-22 08:01:50.00885616 +0000 UTC m=+3047.118310173" Nov 22 08:01:52 crc kubenswrapper[4929]: I1122 08:01:52.373801 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:52 crc kubenswrapper[4929]: I1122 08:01:52.374979 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:01:53 crc kubenswrapper[4929]: I1122 08:01:53.425536 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-kszbt" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="registry-server" probeResult="failure" output=< Nov 22 08:01:53 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:01:53 crc kubenswrapper[4929]: > Nov 22 08:02:02 crc kubenswrapper[4929]: I1122 08:02:02.426572 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:02:02 crc kubenswrapper[4929]: I1122 08:02:02.481124 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:02:03 crc kubenswrapper[4929]: I1122 08:02:03.249337 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.131478 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kszbt" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="registry-server" containerID="cri-o://b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531" gracePeriod=2 Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.642193 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.688881 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content\") pod \"92167854-bed9-49d3-a046-1585524bd8ee\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.688963 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities\") pod \"92167854-bed9-49d3-a046-1585524bd8ee\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.689225 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p8td\" (UniqueName: \"kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td\") pod \"92167854-bed9-49d3-a046-1585524bd8ee\" (UID: \"92167854-bed9-49d3-a046-1585524bd8ee\") " Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.690870 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities" (OuterVolumeSpecName: "utilities") pod "92167854-bed9-49d3-a046-1585524bd8ee" (UID: "92167854-bed9-49d3-a046-1585524bd8ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.694355 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td" (OuterVolumeSpecName: "kube-api-access-5p8td") pod "92167854-bed9-49d3-a046-1585524bd8ee" (UID: "92167854-bed9-49d3-a046-1585524bd8ee"). InnerVolumeSpecName "kube-api-access-5p8td". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.785414 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "92167854-bed9-49d3-a046-1585524bd8ee" (UID: "92167854-bed9-49d3-a046-1585524bd8ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.791496 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p8td\" (UniqueName: \"kubernetes.io/projected/92167854-bed9-49d3-a046-1585524bd8ee-kube-api-access-5p8td\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.791535 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:04 crc kubenswrapper[4929]: I1122 08:02:04.791547 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/92167854-bed9-49d3-a046-1585524bd8ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.154418 4929 generic.go:334] "Generic (PLEG): container finished" podID="92167854-bed9-49d3-a046-1585524bd8ee" containerID="b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531" exitCode=0 Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.154494 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerDied","Data":"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531"} Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.154543 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kszbt" event={"ID":"92167854-bed9-49d3-a046-1585524bd8ee","Type":"ContainerDied","Data":"6d5cea2423c22555711307a30d3044e4d063332a0585acf39a7676a3b206590e"} Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.154581 4929 scope.go:117] "RemoveContainer" containerID="b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.154973 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kszbt" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.208172 4929 scope.go:117] "RemoveContainer" containerID="aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.213361 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.220353 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kszbt"] Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.240548 4929 scope.go:117] "RemoveContainer" containerID="c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.309580 4929 scope.go:117] "RemoveContainer" containerID="b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531" Nov 22 08:02:05 crc kubenswrapper[4929]: E1122 08:02:05.310260 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531\": container with ID starting with b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531 not found: ID does not exist" containerID="b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.310316 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531"} err="failed to get container status \"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531\": rpc error: code = NotFound desc = could not find container \"b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531\": container with ID starting with b6252cb126e904ae5c9698ac05fab012cd63dba0c33849d562d1388431513531 not found: ID does not exist" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.310349 4929 scope.go:117] "RemoveContainer" containerID="aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55" Nov 22 08:02:05 crc kubenswrapper[4929]: E1122 08:02:05.310842 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55\": container with ID starting with aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55 not found: ID does not exist" containerID="aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.310885 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55"} err="failed to get container status \"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55\": rpc error: code = NotFound desc = could not find container \"aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55\": container with ID starting with aa281ab93070281fa3121544b4255c9c5047898351b01a5f317ec2b7f0c60b55 not found: ID does not exist" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.310913 4929 scope.go:117] "RemoveContainer" containerID="c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97" Nov 22 08:02:05 crc kubenswrapper[4929]: E1122 08:02:05.311400 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97\": container with ID starting with c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97 not found: ID does not exist" containerID="c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.311430 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97"} err="failed to get container status \"c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97\": rpc error: code = NotFound desc = could not find container \"c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97\": container with ID starting with c5eefeb0f553d99feb35d33ccfcf499bf1925f84d13dd1eec21585aa3402ae97 not found: ID does not exist" Nov 22 08:02:05 crc kubenswrapper[4929]: I1122 08:02:05.962471 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92167854-bed9-49d3-a046-1585524bd8ee" path="/var/lib/kubelet/pods/92167854-bed9-49d3-a046-1585524bd8ee/volumes" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.199389 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200324 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="extract-utilities" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200338 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="extract-utilities" Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200360 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="extract-content" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200365 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="extract-content" Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200378 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200394 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200413 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200419 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200434 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="extract-utilities" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200440 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="extract-utilities" Nov 22 08:02:11 crc kubenswrapper[4929]: E1122 08:02:11.200451 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="extract-content" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200456 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="extract-content" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200659 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a57dbe-0ede-40a4-bcdf-b59556f2d59e" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.200672 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="92167854-bed9-49d3-a046-1585524bd8ee" containerName="registry-server" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.203136 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.230760 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.329243 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.329381 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.329427 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54f42\" (UniqueName: \"kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.431134 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.431410 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.431481 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54f42\" (UniqueName: \"kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.431654 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.432017 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.458251 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54f42\" (UniqueName: \"kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42\") pod \"redhat-marketplace-72hgf\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:11 crc kubenswrapper[4929]: I1122 08:02:11.579892 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:12 crc kubenswrapper[4929]: I1122 08:02:12.079790 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:12 crc kubenswrapper[4929]: I1122 08:02:12.230779 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerStarted","Data":"27e2394a5e36f59e07e2fd983bb467a1196d45eae7dac49e0fe5d42c4dd5fcc5"} Nov 22 08:02:13 crc kubenswrapper[4929]: I1122 08:02:13.240283 4929 generic.go:334] "Generic (PLEG): container finished" podID="8cf24693-7ad6-44c7-970c-99f337785e32" containerID="74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2" exitCode=0 Nov 22 08:02:13 crc kubenswrapper[4929]: I1122 08:02:13.240340 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerDied","Data":"74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2"} Nov 22 08:02:16 crc kubenswrapper[4929]: I1122 08:02:16.281136 4929 generic.go:334] "Generic (PLEG): container finished" podID="8cf24693-7ad6-44c7-970c-99f337785e32" containerID="dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187" exitCode=0 Nov 22 08:02:16 crc kubenswrapper[4929]: I1122 08:02:16.281324 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerDied","Data":"dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187"} Nov 22 08:02:18 crc kubenswrapper[4929]: I1122 08:02:18.306751 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerStarted","Data":"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505"} Nov 22 08:02:19 crc kubenswrapper[4929]: I1122 08:02:19.345036 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-72hgf" podStartSLOduration=3.825230088 podStartE2EDuration="8.345017356s" podCreationTimestamp="2025-11-22 08:02:11 +0000 UTC" firstStartedPulling="2025-11-22 08:02:13.241893402 +0000 UTC m=+3070.351347415" lastFinishedPulling="2025-11-22 08:02:17.76168063 +0000 UTC m=+3074.871134683" observedRunningTime="2025-11-22 08:02:19.337647403 +0000 UTC m=+3076.447101416" watchObservedRunningTime="2025-11-22 08:02:19.345017356 +0000 UTC m=+3076.454471369" Nov 22 08:02:21 crc kubenswrapper[4929]: I1122 08:02:21.580051 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:21 crc kubenswrapper[4929]: I1122 08:02:21.580466 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:21 crc kubenswrapper[4929]: I1122 08:02:21.659047 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:31 crc kubenswrapper[4929]: I1122 08:02:31.643260 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:31 crc kubenswrapper[4929]: I1122 08:02:31.699902 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:32 crc kubenswrapper[4929]: I1122 08:02:32.511916 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-72hgf" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="registry-server" containerID="cri-o://8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505" gracePeriod=2 Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.052201 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-s4kv5"] Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.063652 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-s4kv5"] Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.097682 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.212889 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54f42\" (UniqueName: \"kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42\") pod \"8cf24693-7ad6-44c7-970c-99f337785e32\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.213184 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities\") pod \"8cf24693-7ad6-44c7-970c-99f337785e32\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.213230 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content\") pod \"8cf24693-7ad6-44c7-970c-99f337785e32\" (UID: \"8cf24693-7ad6-44c7-970c-99f337785e32\") " Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.213858 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities" (OuterVolumeSpecName: "utilities") pod "8cf24693-7ad6-44c7-970c-99f337785e32" (UID: "8cf24693-7ad6-44c7-970c-99f337785e32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.218058 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42" (OuterVolumeSpecName: "kube-api-access-54f42") pod "8cf24693-7ad6-44c7-970c-99f337785e32" (UID: "8cf24693-7ad6-44c7-970c-99f337785e32"). InnerVolumeSpecName "kube-api-access-54f42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.241361 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8cf24693-7ad6-44c7-970c-99f337785e32" (UID: "8cf24693-7ad6-44c7-970c-99f337785e32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.316487 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.316546 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cf24693-7ad6-44c7-970c-99f337785e32-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.316564 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54f42\" (UniqueName: \"kubernetes.io/projected/8cf24693-7ad6-44c7-970c-99f337785e32-kube-api-access-54f42\") on node \"crc\" DevicePath \"\"" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.527021 4929 generic.go:334] "Generic (PLEG): container finished" podID="8cf24693-7ad6-44c7-970c-99f337785e32" containerID="8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505" exitCode=0 Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.527097 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerDied","Data":"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505"} Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.527112 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-72hgf" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.527199 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-72hgf" event={"ID":"8cf24693-7ad6-44c7-970c-99f337785e32","Type":"ContainerDied","Data":"27e2394a5e36f59e07e2fd983bb467a1196d45eae7dac49e0fe5d42c4dd5fcc5"} Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.527282 4929 scope.go:117] "RemoveContainer" containerID="8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.559513 4929 scope.go:117] "RemoveContainer" containerID="dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.579205 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.587566 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-72hgf"] Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.603861 4929 scope.go:117] "RemoveContainer" containerID="74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.636391 4929 scope.go:117] "RemoveContainer" containerID="8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505" Nov 22 08:02:33 crc kubenswrapper[4929]: E1122 08:02:33.636884 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505\": container with ID starting with 8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505 not found: ID does not exist" containerID="8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.636933 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505"} err="failed to get container status \"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505\": rpc error: code = NotFound desc = could not find container \"8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505\": container with ID starting with 8617be69d5648925f22c0d9bf737419b471e64aa0e5ffe6356cf4417fb280505 not found: ID does not exist" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.636969 4929 scope.go:117] "RemoveContainer" containerID="dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187" Nov 22 08:02:33 crc kubenswrapper[4929]: E1122 08:02:33.637453 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187\": container with ID starting with dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187 not found: ID does not exist" containerID="dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.637540 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187"} err="failed to get container status \"dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187\": rpc error: code = NotFound desc = could not find container \"dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187\": container with ID starting with dd522a2c46c968ea9b5bdfc9cfaf552b51b481b5670bcd1d4f723e4392571187 not found: ID does not exist" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.637610 4929 scope.go:117] "RemoveContainer" containerID="74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2" Nov 22 08:02:33 crc kubenswrapper[4929]: E1122 08:02:33.637964 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2\": container with ID starting with 74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2 not found: ID does not exist" containerID="74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.637992 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2"} err="failed to get container status \"74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2\": rpc error: code = NotFound desc = could not find container \"74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2\": container with ID starting with 74355b55239ec31e9f4dcd8f2e315ef3c351781ec57ff0921f67eb32c67cd4b2 not found: ID does not exist" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.960983 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" path="/var/lib/kubelet/pods/8cf24693-7ad6-44c7-970c-99f337785e32/volumes" Nov 22 08:02:33 crc kubenswrapper[4929]: I1122 08:02:33.962502 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3c0d097-15a2-494f-a1cc-2bde685efa87" path="/var/lib/kubelet/pods/f3c0d097-15a2-494f-a1cc-2bde685efa87/volumes" Nov 22 08:02:48 crc kubenswrapper[4929]: I1122 08:02:48.594647 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:02:48 crc kubenswrapper[4929]: I1122 08:02:48.595524 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:02:51 crc kubenswrapper[4929]: I1122 08:02:51.057927 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-tqf65"] Nov 22 08:02:51 crc kubenswrapper[4929]: I1122 08:02:51.065542 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-tqf65"] Nov 22 08:02:51 crc kubenswrapper[4929]: I1122 08:02:51.957553 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebe2ed5d-6475-409a-a3a9-9a47d3de685b" path="/var/lib/kubelet/pods/ebe2ed5d-6475-409a-a3a9-9a47d3de685b/volumes" Nov 22 08:02:58 crc kubenswrapper[4929]: I1122 08:02:58.028560 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-bs8nq"] Nov 22 08:02:58 crc kubenswrapper[4929]: I1122 08:02:58.038446 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-bs8nq"] Nov 22 08:02:59 crc kubenswrapper[4929]: I1122 08:02:59.968801 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d2e4e61-cf07-4acf-9ecf-e460a90b13aa" path="/var/lib/kubelet/pods/6d2e4e61-cf07-4acf-9ecf-e460a90b13aa/volumes" Nov 22 08:03:13 crc kubenswrapper[4929]: I1122 08:03:13.103349 4929 scope.go:117] "RemoveContainer" containerID="f740af74253fae41d9c7439cd2325c66c7cca6c486a28435a60a8232a2090e73" Nov 22 08:03:13 crc kubenswrapper[4929]: I1122 08:03:13.163155 4929 scope.go:117] "RemoveContainer" containerID="e1a366137201b6689cd0e75aadbbf83f57a48e308149db83354111c219e52e1d" Nov 22 08:03:13 crc kubenswrapper[4929]: I1122 08:03:13.221386 4929 scope.go:117] "RemoveContainer" containerID="4a0a38ab0e02d62955d90b79852d2db15e898b47138169421d7c158ef27aa7be" Nov 22 08:03:18 crc kubenswrapper[4929]: I1122 08:03:18.595573 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:03:18 crc kubenswrapper[4929]: I1122 08:03:18.596274 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:03:48 crc kubenswrapper[4929]: I1122 08:03:48.595014 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:03:48 crc kubenswrapper[4929]: I1122 08:03:48.595561 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:03:48 crc kubenswrapper[4929]: I1122 08:03:48.595598 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:03:48 crc kubenswrapper[4929]: I1122 08:03:48.596372 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:03:48 crc kubenswrapper[4929]: I1122 08:03:48.596427 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" gracePeriod=600 Nov 22 08:03:48 crc kubenswrapper[4929]: E1122 08:03:48.737254 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:03:49 crc kubenswrapper[4929]: I1122 08:03:49.315319 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" exitCode=0 Nov 22 08:03:49 crc kubenswrapper[4929]: I1122 08:03:49.315363 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531"} Nov 22 08:03:49 crc kubenswrapper[4929]: I1122 08:03:49.315752 4929 scope.go:117] "RemoveContainer" containerID="1fd8dd54949c05dad9f35e5eb488d84888297acdf4612e470c54032ca755c997" Nov 22 08:03:49 crc kubenswrapper[4929]: I1122 08:03:49.316591 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:03:49 crc kubenswrapper[4929]: E1122 08:03:49.317026 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:04:01 crc kubenswrapper[4929]: I1122 08:04:01.948440 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:04:01 crc kubenswrapper[4929]: E1122 08:04:01.949629 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:04:13 crc kubenswrapper[4929]: I1122 08:04:13.960694 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:04:13 crc kubenswrapper[4929]: E1122 08:04:13.961538 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:04:24 crc kubenswrapper[4929]: I1122 08:04:24.947873 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:04:24 crc kubenswrapper[4929]: E1122 08:04:24.949269 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:04:37 crc kubenswrapper[4929]: I1122 08:04:37.948482 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:04:37 crc kubenswrapper[4929]: E1122 08:04:37.950874 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:04:52 crc kubenswrapper[4929]: I1122 08:04:52.947298 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:04:52 crc kubenswrapper[4929]: E1122 08:04:52.948096 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:05:06 crc kubenswrapper[4929]: I1122 08:05:06.947893 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:05:06 crc kubenswrapper[4929]: E1122 08:05:06.948838 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:05:08 crc kubenswrapper[4929]: I1122 08:05:08.043709 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tf9kg"] Nov 22 08:05:08 crc kubenswrapper[4929]: I1122 08:05:08.051989 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tf9kg"] Nov 22 08:05:09 crc kubenswrapper[4929]: I1122 08:05:09.961068 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="276eb7e8-f411-4d79-8ca1-0a7d55fa83ff" path="/var/lib/kubelet/pods/276eb7e8-f411-4d79-8ca1-0a7d55fa83ff/volumes" Nov 22 08:05:13 crc kubenswrapper[4929]: I1122 08:05:13.374858 4929 scope.go:117] "RemoveContainer" containerID="3f06f4053b60d4e141beeb4898f5c10ca17684c7370470eab5853c918287e253" Nov 22 08:05:17 crc kubenswrapper[4929]: I1122 08:05:17.949469 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:05:17 crc kubenswrapper[4929]: E1122 08:05:17.950476 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:05:28 crc kubenswrapper[4929]: I1122 08:05:28.054767 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-x8qjf"] Nov 22 08:05:28 crc kubenswrapper[4929]: I1122 08:05:28.065629 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-x8qjf"] Nov 22 08:05:29 crc kubenswrapper[4929]: I1122 08:05:29.958160 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="861208ae-0e23-4cab-9ac4-b08cade2fead" path="/var/lib/kubelet/pods/861208ae-0e23-4cab-9ac4-b08cade2fead/volumes" Nov 22 08:05:30 crc kubenswrapper[4929]: I1122 08:05:30.947776 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:05:30 crc kubenswrapper[4929]: E1122 08:05:30.948020 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:05:42 crc kubenswrapper[4929]: I1122 08:05:42.947900 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:05:42 crc kubenswrapper[4929]: E1122 08:05:42.948616 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:05:56 crc kubenswrapper[4929]: I1122 08:05:56.948177 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:05:56 crc kubenswrapper[4929]: E1122 08:05:56.949386 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:06:09 crc kubenswrapper[4929]: I1122 08:06:09.948141 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:06:09 crc kubenswrapper[4929]: E1122 08:06:09.948945 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:06:13 crc kubenswrapper[4929]: I1122 08:06:13.477934 4929 scope.go:117] "RemoveContainer" containerID="5d004db3ce6a052db7901def25365e428f0ce7cabacf8a57a1b2750cbc41bb80" Nov 22 08:06:13 crc kubenswrapper[4929]: I1122 08:06:13.522559 4929 scope.go:117] "RemoveContainer" containerID="1219ce4aabfc847355138a1b41038f8de39c771d9c86a03ec929a438847c94a7" Nov 22 08:06:13 crc kubenswrapper[4929]: I1122 08:06:13.545813 4929 scope.go:117] "RemoveContainer" containerID="84f9f1c35f10a2958f5931adecf0660deba76d33196e95ad9af570df2bf612f3" Nov 22 08:06:13 crc kubenswrapper[4929]: I1122 08:06:13.563598 4929 scope.go:117] "RemoveContainer" containerID="ca89836a8af43866752dbd81f549c168211949ed4aed22a0fc856144ec7e9197" Nov 22 08:06:22 crc kubenswrapper[4929]: I1122 08:06:22.947984 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:06:22 crc kubenswrapper[4929]: E1122 08:06:22.950468 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:06:36 crc kubenswrapper[4929]: I1122 08:06:36.947960 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:06:36 crc kubenswrapper[4929]: E1122 08:06:36.948843 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:06:48 crc kubenswrapper[4929]: I1122 08:06:48.947469 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:06:48 crc kubenswrapper[4929]: E1122 08:06:48.948799 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:07:01 crc kubenswrapper[4929]: I1122 08:07:01.947816 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:07:01 crc kubenswrapper[4929]: E1122 08:07:01.948750 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:07:12 crc kubenswrapper[4929]: I1122 08:07:12.948058 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:07:12 crc kubenswrapper[4929]: E1122 08:07:12.948884 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:07:27 crc kubenswrapper[4929]: I1122 08:07:27.953606 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:07:27 crc kubenswrapper[4929]: E1122 08:07:27.954601 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:07:40 crc kubenswrapper[4929]: I1122 08:07:40.947295 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:07:40 crc kubenswrapper[4929]: E1122 08:07:40.947994 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:07:52 crc kubenswrapper[4929]: I1122 08:07:52.948302 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:07:52 crc kubenswrapper[4929]: E1122 08:07:52.949022 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:08:07 crc kubenswrapper[4929]: I1122 08:08:07.948413 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:08:07 crc kubenswrapper[4929]: E1122 08:08:07.949198 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:08:19 crc kubenswrapper[4929]: I1122 08:08:19.947828 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:08:19 crc kubenswrapper[4929]: E1122 08:08:19.948993 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:08:31 crc kubenswrapper[4929]: I1122 08:08:31.947315 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:08:31 crc kubenswrapper[4929]: E1122 08:08:31.948102 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:08:42 crc kubenswrapper[4929]: I1122 08:08:42.955165 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:08:42 crc kubenswrapper[4929]: E1122 08:08:42.956012 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:08:57 crc kubenswrapper[4929]: I1122 08:08:57.948064 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:08:58 crc kubenswrapper[4929]: I1122 08:08:58.458149 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763"} Nov 22 08:11:18 crc kubenswrapper[4929]: I1122 08:11:18.595003 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:11:18 crc kubenswrapper[4929]: I1122 08:11:18.595771 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.350965 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:30 crc kubenswrapper[4929]: E1122 08:11:30.351868 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="registry-server" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.351885 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="registry-server" Nov 22 08:11:30 crc kubenswrapper[4929]: E1122 08:11:30.351910 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="extract-content" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.351920 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="extract-content" Nov 22 08:11:30 crc kubenswrapper[4929]: E1122 08:11:30.351938 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="extract-utilities" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.351947 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="extract-utilities" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.352174 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cf24693-7ad6-44c7-970c-99f337785e32" containerName="registry-server" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.353970 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.366990 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.390463 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmt6m\" (UniqueName: \"kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.390580 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.390699 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.492907 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.493018 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.493090 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmt6m\" (UniqueName: \"kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.493673 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.493853 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.512194 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmt6m\" (UniqueName: \"kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m\") pod \"community-operators-9b4m8\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:30 crc kubenswrapper[4929]: I1122 08:11:30.685189 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:31 crc kubenswrapper[4929]: I1122 08:11:31.006796 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:31 crc kubenswrapper[4929]: E1122 08:11:31.687635 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08f1c5c3_d032_477e_8fa4_08967e353598.slice/crio-ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08f1c5c3_d032_477e_8fa4_08967e353598.slice/crio-conmon-ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656.scope\": RecentStats: unable to find data in memory cache]" Nov 22 08:11:31 crc kubenswrapper[4929]: I1122 08:11:31.911776 4929 generic.go:334] "Generic (PLEG): container finished" podID="08f1c5c3-d032-477e-8fa4-08967e353598" containerID="ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656" exitCode=0 Nov 22 08:11:31 crc kubenswrapper[4929]: I1122 08:11:31.911822 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerDied","Data":"ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656"} Nov 22 08:11:31 crc kubenswrapper[4929]: I1122 08:11:31.911852 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerStarted","Data":"345ced5290da6e90d44bfbf105cda539b5e5a4da8dbf60af0aea70adfdca70c3"} Nov 22 08:11:31 crc kubenswrapper[4929]: I1122 08:11:31.914877 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 08:11:33 crc kubenswrapper[4929]: I1122 08:11:33.929334 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerStarted","Data":"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9"} Nov 22 08:11:34 crc kubenswrapper[4929]: I1122 08:11:34.945007 4929 generic.go:334] "Generic (PLEG): container finished" podID="08f1c5c3-d032-477e-8fa4-08967e353598" containerID="49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9" exitCode=0 Nov 22 08:11:34 crc kubenswrapper[4929]: I1122 08:11:34.945117 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerDied","Data":"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9"} Nov 22 08:11:35 crc kubenswrapper[4929]: I1122 08:11:35.960295 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerStarted","Data":"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5"} Nov 22 08:11:35 crc kubenswrapper[4929]: I1122 08:11:35.980704 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9b4m8" podStartSLOduration=2.164268536 podStartE2EDuration="5.980683051s" podCreationTimestamp="2025-11-22 08:11:30 +0000 UTC" firstStartedPulling="2025-11-22 08:11:31.91453996 +0000 UTC m=+3629.023993983" lastFinishedPulling="2025-11-22 08:11:35.730954475 +0000 UTC m=+3632.840408498" observedRunningTime="2025-11-22 08:11:35.977343758 +0000 UTC m=+3633.086797781" watchObservedRunningTime="2025-11-22 08:11:35.980683051 +0000 UTC m=+3633.090137074" Nov 22 08:11:40 crc kubenswrapper[4929]: I1122 08:11:40.685577 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:40 crc kubenswrapper[4929]: I1122 08:11:40.686179 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:40 crc kubenswrapper[4929]: I1122 08:11:40.735548 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:41 crc kubenswrapper[4929]: I1122 08:11:41.064106 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:41 crc kubenswrapper[4929]: I1122 08:11:41.118857 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.028314 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9b4m8" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="registry-server" containerID="cri-o://f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5" gracePeriod=2 Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.517754 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.699142 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities\") pod \"08f1c5c3-d032-477e-8fa4-08967e353598\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.699340 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmt6m\" (UniqueName: \"kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m\") pod \"08f1c5c3-d032-477e-8fa4-08967e353598\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.699382 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content\") pod \"08f1c5c3-d032-477e-8fa4-08967e353598\" (UID: \"08f1c5c3-d032-477e-8fa4-08967e353598\") " Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.701109 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities" (OuterVolumeSpecName: "utilities") pod "08f1c5c3-d032-477e-8fa4-08967e353598" (UID: "08f1c5c3-d032-477e-8fa4-08967e353598"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.709357 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m" (OuterVolumeSpecName: "kube-api-access-fmt6m") pod "08f1c5c3-d032-477e-8fa4-08967e353598" (UID: "08f1c5c3-d032-477e-8fa4-08967e353598"). InnerVolumeSpecName "kube-api-access-fmt6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.763895 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08f1c5c3-d032-477e-8fa4-08967e353598" (UID: "08f1c5c3-d032-477e-8fa4-08967e353598"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.802314 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.802349 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmt6m\" (UniqueName: \"kubernetes.io/projected/08f1c5c3-d032-477e-8fa4-08967e353598-kube-api-access-fmt6m\") on node \"crc\" DevicePath \"\"" Nov 22 08:11:43 crc kubenswrapper[4929]: I1122 08:11:43.802360 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08f1c5c3-d032-477e-8fa4-08967e353598-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.039493 4929 generic.go:334] "Generic (PLEG): container finished" podID="08f1c5c3-d032-477e-8fa4-08967e353598" containerID="f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5" exitCode=0 Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.039533 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerDied","Data":"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5"} Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.039553 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9b4m8" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.039567 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9b4m8" event={"ID":"08f1c5c3-d032-477e-8fa4-08967e353598","Type":"ContainerDied","Data":"345ced5290da6e90d44bfbf105cda539b5e5a4da8dbf60af0aea70adfdca70c3"} Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.039635 4929 scope.go:117] "RemoveContainer" containerID="f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.072670 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.078194 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9b4m8"] Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.079255 4929 scope.go:117] "RemoveContainer" containerID="49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.098247 4929 scope.go:117] "RemoveContainer" containerID="ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.149666 4929 scope.go:117] "RemoveContainer" containerID="f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5" Nov 22 08:11:44 crc kubenswrapper[4929]: E1122 08:11:44.150788 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5\": container with ID starting with f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5 not found: ID does not exist" containerID="f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.150822 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5"} err="failed to get container status \"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5\": rpc error: code = NotFound desc = could not find container \"f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5\": container with ID starting with f669d0b849fb5e88bd395fe561fe14aaad2df0a890ae39c4930175c579c6c1f5 not found: ID does not exist" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.150841 4929 scope.go:117] "RemoveContainer" containerID="49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9" Nov 22 08:11:44 crc kubenswrapper[4929]: E1122 08:11:44.153948 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9\": container with ID starting with 49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9 not found: ID does not exist" containerID="49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.153972 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9"} err="failed to get container status \"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9\": rpc error: code = NotFound desc = could not find container \"49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9\": container with ID starting with 49a4687c6fd48c083fcce80d08715d9b486303ba0cc41c7cbf4219e094b1fed9 not found: ID does not exist" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.153985 4929 scope.go:117] "RemoveContainer" containerID="ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656" Nov 22 08:11:44 crc kubenswrapper[4929]: E1122 08:11:44.154316 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656\": container with ID starting with ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656 not found: ID does not exist" containerID="ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656" Nov 22 08:11:44 crc kubenswrapper[4929]: I1122 08:11:44.154371 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656"} err="failed to get container status \"ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656\": rpc error: code = NotFound desc = could not find container \"ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656\": container with ID starting with ceacc839d56368db6a1268ba7e2382d71a59509001b1b115f78799d2de8e2656 not found: ID does not exist" Nov 22 08:11:45 crc kubenswrapper[4929]: I1122 08:11:45.963167 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" path="/var/lib/kubelet/pods/08f1c5c3-d032-477e-8fa4-08967e353598/volumes" Nov 22 08:11:48 crc kubenswrapper[4929]: I1122 08:11:48.594146 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:11:48 crc kubenswrapper[4929]: I1122 08:11:48.594467 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:12:18 crc kubenswrapper[4929]: I1122 08:12:18.594864 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:12:18 crc kubenswrapper[4929]: I1122 08:12:18.595523 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:12:18 crc kubenswrapper[4929]: I1122 08:12:18.595570 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:12:18 crc kubenswrapper[4929]: I1122 08:12:18.596512 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:12:18 crc kubenswrapper[4929]: I1122 08:12:18.596585 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763" gracePeriod=600 Nov 22 08:12:19 crc kubenswrapper[4929]: I1122 08:12:19.366138 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763" exitCode=0 Nov 22 08:12:19 crc kubenswrapper[4929]: I1122 08:12:19.366342 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763"} Nov 22 08:12:19 crc kubenswrapper[4929]: I1122 08:12:19.367032 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f"} Nov 22 08:12:19 crc kubenswrapper[4929]: I1122 08:12:19.367131 4929 scope.go:117] "RemoveContainer" containerID="df6f2ec6a421cc40a342ddbb72231e35185bff8550eed50c79e23d1385733531" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.848652 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:21 crc kubenswrapper[4929]: E1122 08:12:21.849756 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="registry-server" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.849775 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="registry-server" Nov 22 08:12:21 crc kubenswrapper[4929]: E1122 08:12:21.849801 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="extract-utilities" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.849810 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="extract-utilities" Nov 22 08:12:21 crc kubenswrapper[4929]: E1122 08:12:21.849821 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="extract-content" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.849829 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="extract-content" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.850136 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="08f1c5c3-d032-477e-8fa4-08967e353598" containerName="registry-server" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.852130 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.864105 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.988624 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.988687 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9qcw\" (UniqueName: \"kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:21 crc kubenswrapper[4929]: I1122 08:12:21.988805 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.090795 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.090875 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9qcw\" (UniqueName: \"kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.090981 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.091275 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.091529 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.111177 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9qcw\" (UniqueName: \"kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw\") pod \"redhat-operators-47mt9\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.179341 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:22 crc kubenswrapper[4929]: I1122 08:12:22.671121 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:22 crc kubenswrapper[4929]: W1122 08:12:22.672425 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57f6b16a_dd67_4be4_a38d_101934de5c43.slice/crio-1e8b257f64531ac19d5410da7ffccfd9c7b86904f140890254a756cb44bb0511 WatchSource:0}: Error finding container 1e8b257f64531ac19d5410da7ffccfd9c7b86904f140890254a756cb44bb0511: Status 404 returned error can't find the container with id 1e8b257f64531ac19d5410da7ffccfd9c7b86904f140890254a756cb44bb0511 Nov 22 08:12:23 crc kubenswrapper[4929]: I1122 08:12:23.419075 4929 generic.go:334] "Generic (PLEG): container finished" podID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerID="84069f60948bebc11ea51b3f6805e92f670ca7612391669e7bdc415f27322a3a" exitCode=0 Nov 22 08:12:23 crc kubenswrapper[4929]: I1122 08:12:23.419130 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerDied","Data":"84069f60948bebc11ea51b3f6805e92f670ca7612391669e7bdc415f27322a3a"} Nov 22 08:12:23 crc kubenswrapper[4929]: I1122 08:12:23.419398 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerStarted","Data":"1e8b257f64531ac19d5410da7ffccfd9c7b86904f140890254a756cb44bb0511"} Nov 22 08:12:24 crc kubenswrapper[4929]: I1122 08:12:24.430603 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerStarted","Data":"2f1b71acc7f6a1c0d154ebdf01b48d05fc907763348d682a6f725762f6a3ef62"} Nov 22 08:12:25 crc kubenswrapper[4929]: I1122 08:12:25.441984 4929 generic.go:334] "Generic (PLEG): container finished" podID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerID="2f1b71acc7f6a1c0d154ebdf01b48d05fc907763348d682a6f725762f6a3ef62" exitCode=0 Nov 22 08:12:25 crc kubenswrapper[4929]: I1122 08:12:25.442112 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerDied","Data":"2f1b71acc7f6a1c0d154ebdf01b48d05fc907763348d682a6f725762f6a3ef62"} Nov 22 08:12:27 crc kubenswrapper[4929]: I1122 08:12:27.462346 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerStarted","Data":"89d90ab83572c4c99204116704b135b69b7087eb816f013818a5a5cf0a61538f"} Nov 22 08:12:27 crc kubenswrapper[4929]: I1122 08:12:27.483472 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-47mt9" podStartSLOduration=3.5193507840000002 podStartE2EDuration="6.483450079s" podCreationTimestamp="2025-11-22 08:12:21 +0000 UTC" firstStartedPulling="2025-11-22 08:12:23.420950408 +0000 UTC m=+3680.530404421" lastFinishedPulling="2025-11-22 08:12:26.385049693 +0000 UTC m=+3683.494503716" observedRunningTime="2025-11-22 08:12:27.480998628 +0000 UTC m=+3684.590452641" watchObservedRunningTime="2025-11-22 08:12:27.483450079 +0000 UTC m=+3684.592904092" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.180871 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.181484 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.261340 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.509416 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.512487 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.527384 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.561684 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.599235 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.599321 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mz7r8\" (UniqueName: \"kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.599457 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.701746 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.701872 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.701940 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mz7r8\" (UniqueName: \"kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.702304 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.702394 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.722601 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mz7r8\" (UniqueName: \"kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8\") pod \"redhat-marketplace-qnk76\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:32 crc kubenswrapper[4929]: I1122 08:12:32.837272 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:33 crc kubenswrapper[4929]: I1122 08:12:33.300553 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:33 crc kubenswrapper[4929]: W1122 08:12:33.307189 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22250678_650c_4258_a09e_854bdf010668.slice/crio-b21856e102491f216f5f3955c154556eee4966e4ee047d206e923cb943549691 WatchSource:0}: Error finding container b21856e102491f216f5f3955c154556eee4966e4ee047d206e923cb943549691: Status 404 returned error can't find the container with id b21856e102491f216f5f3955c154556eee4966e4ee047d206e923cb943549691 Nov 22 08:12:33 crc kubenswrapper[4929]: I1122 08:12:33.512995 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerStarted","Data":"b21856e102491f216f5f3955c154556eee4966e4ee047d206e923cb943549691"} Nov 22 08:12:34 crc kubenswrapper[4929]: I1122 08:12:34.524048 4929 generic.go:334] "Generic (PLEG): container finished" podID="22250678-650c-4258-a09e-854bdf010668" containerID="b1e8634e3903b9c6cd32dcf11570c58740860cda922c6b17bcbd8b56cb031948" exitCode=0 Nov 22 08:12:34 crc kubenswrapper[4929]: I1122 08:12:34.524112 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerDied","Data":"b1e8634e3903b9c6cd32dcf11570c58740860cda922c6b17bcbd8b56cb031948"} Nov 22 08:12:34 crc kubenswrapper[4929]: I1122 08:12:34.904259 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:34 crc kubenswrapper[4929]: I1122 08:12:34.904521 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-47mt9" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="registry-server" containerID="cri-o://89d90ab83572c4c99204116704b135b69b7087eb816f013818a5a5cf0a61538f" gracePeriod=2 Nov 22 08:12:36 crc kubenswrapper[4929]: I1122 08:12:36.557325 4929 generic.go:334] "Generic (PLEG): container finished" podID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerID="89d90ab83572c4c99204116704b135b69b7087eb816f013818a5a5cf0a61538f" exitCode=0 Nov 22 08:12:36 crc kubenswrapper[4929]: I1122 08:12:36.557396 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerDied","Data":"89d90ab83572c4c99204116704b135b69b7087eb816f013818a5a5cf0a61538f"} Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.443606 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.518003 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content\") pod \"57f6b16a-dd67-4be4-a38d-101934de5c43\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.518076 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9qcw\" (UniqueName: \"kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw\") pod \"57f6b16a-dd67-4be4-a38d-101934de5c43\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.518329 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities\") pod \"57f6b16a-dd67-4be4-a38d-101934de5c43\" (UID: \"57f6b16a-dd67-4be4-a38d-101934de5c43\") " Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.519792 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities" (OuterVolumeSpecName: "utilities") pod "57f6b16a-dd67-4be4-a38d-101934de5c43" (UID: "57f6b16a-dd67-4be4-a38d-101934de5c43"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.525456 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw" (OuterVolumeSpecName: "kube-api-access-n9qcw") pod "57f6b16a-dd67-4be4-a38d-101934de5c43" (UID: "57f6b16a-dd67-4be4-a38d-101934de5c43"). InnerVolumeSpecName "kube-api-access-n9qcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.573016 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-47mt9" event={"ID":"57f6b16a-dd67-4be4-a38d-101934de5c43","Type":"ContainerDied","Data":"1e8b257f64531ac19d5410da7ffccfd9c7b86904f140890254a756cb44bb0511"} Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.573077 4929 scope.go:117] "RemoveContainer" containerID="89d90ab83572c4c99204116704b135b69b7087eb816f013818a5a5cf0a61538f" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.573075 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-47mt9" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.621031 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9qcw\" (UniqueName: \"kubernetes.io/projected/57f6b16a-dd67-4be4-a38d-101934de5c43-kube-api-access-n9qcw\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.621068 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.768136 4929 scope.go:117] "RemoveContainer" containerID="2f1b71acc7f6a1c0d154ebdf01b48d05fc907763348d682a6f725762f6a3ef62" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.833525 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57f6b16a-dd67-4be4-a38d-101934de5c43" (UID: "57f6b16a-dd67-4be4-a38d-101934de5c43"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.929265 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57f6b16a-dd67-4be4-a38d-101934de5c43-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.964219 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:37 crc kubenswrapper[4929]: I1122 08:12:37.970604 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-47mt9"] Nov 22 08:12:38 crc kubenswrapper[4929]: I1122 08:12:38.007550 4929 scope.go:117] "RemoveContainer" containerID="84069f60948bebc11ea51b3f6805e92f670ca7612391669e7bdc415f27322a3a" Nov 22 08:12:38 crc kubenswrapper[4929]: I1122 08:12:38.587568 4929 generic.go:334] "Generic (PLEG): container finished" podID="22250678-650c-4258-a09e-854bdf010668" containerID="e5061575b667a9f4b12928fc562c9ce13f54bac3b6bca83ce6d1ab7ec3ae7668" exitCode=0 Nov 22 08:12:38 crc kubenswrapper[4929]: I1122 08:12:38.587624 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerDied","Data":"e5061575b667a9f4b12928fc562c9ce13f54bac3b6bca83ce6d1ab7ec3ae7668"} Nov 22 08:12:39 crc kubenswrapper[4929]: I1122 08:12:39.604768 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerStarted","Data":"2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9"} Nov 22 08:12:39 crc kubenswrapper[4929]: I1122 08:12:39.626761 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qnk76" podStartSLOduration=2.919531719 podStartE2EDuration="7.626737242s" podCreationTimestamp="2025-11-22 08:12:32 +0000 UTC" firstStartedPulling="2025-11-22 08:12:34.527549918 +0000 UTC m=+3691.637003931" lastFinishedPulling="2025-11-22 08:12:39.234755441 +0000 UTC m=+3696.344209454" observedRunningTime="2025-11-22 08:12:39.623533063 +0000 UTC m=+3696.732987096" watchObservedRunningTime="2025-11-22 08:12:39.626737242 +0000 UTC m=+3696.736191255" Nov 22 08:12:39 crc kubenswrapper[4929]: I1122 08:12:39.960467 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" path="/var/lib/kubelet/pods/57f6b16a-dd67-4be4-a38d-101934de5c43/volumes" Nov 22 08:12:42 crc kubenswrapper[4929]: I1122 08:12:42.837724 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:42 crc kubenswrapper[4929]: I1122 08:12:42.838373 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:42 crc kubenswrapper[4929]: I1122 08:12:42.893777 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:52 crc kubenswrapper[4929]: I1122 08:12:52.881341 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:52 crc kubenswrapper[4929]: I1122 08:12:52.934856 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:53 crc kubenswrapper[4929]: I1122 08:12:53.745274 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qnk76" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="registry-server" containerID="cri-o://2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9" gracePeriod=2 Nov 22 08:12:53 crc kubenswrapper[4929]: E1122 08:12:53.892588 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22250678_650c_4258_a09e_854bdf010668.slice/crio-2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9.scope\": RecentStats: unable to find data in memory cache]" Nov 22 08:12:54 crc kubenswrapper[4929]: I1122 08:12:54.783670 4929 generic.go:334] "Generic (PLEG): container finished" podID="22250678-650c-4258-a09e-854bdf010668" containerID="2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9" exitCode=0 Nov 22 08:12:54 crc kubenswrapper[4929]: I1122 08:12:54.783726 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerDied","Data":"2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9"} Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.170862 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.282061 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content\") pod \"22250678-650c-4258-a09e-854bdf010668\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.282114 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mz7r8\" (UniqueName: \"kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8\") pod \"22250678-650c-4258-a09e-854bdf010668\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.282237 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities\") pod \"22250678-650c-4258-a09e-854bdf010668\" (UID: \"22250678-650c-4258-a09e-854bdf010668\") " Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.282980 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities" (OuterVolumeSpecName: "utilities") pod "22250678-650c-4258-a09e-854bdf010668" (UID: "22250678-650c-4258-a09e-854bdf010668"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.289060 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8" (OuterVolumeSpecName: "kube-api-access-mz7r8") pod "22250678-650c-4258-a09e-854bdf010668" (UID: "22250678-650c-4258-a09e-854bdf010668"). InnerVolumeSpecName "kube-api-access-mz7r8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.304404 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "22250678-650c-4258-a09e-854bdf010668" (UID: "22250678-650c-4258-a09e-854bdf010668"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.384072 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.384287 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mz7r8\" (UniqueName: \"kubernetes.io/projected/22250678-650c-4258-a09e-854bdf010668-kube-api-access-mz7r8\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.384608 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22250678-650c-4258-a09e-854bdf010668-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.803741 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qnk76" event={"ID":"22250678-650c-4258-a09e-854bdf010668","Type":"ContainerDied","Data":"b21856e102491f216f5f3955c154556eee4966e4ee047d206e923cb943549691"} Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.803828 4929 scope.go:117] "RemoveContainer" containerID="2a4d6a6e6036b4742a08c2dcc01771b9f5bcbdba912e1dbbf37fb11aaca13eb9" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.803833 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qnk76" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.831154 4929 scope.go:117] "RemoveContainer" containerID="e5061575b667a9f4b12928fc562c9ce13f54bac3b6bca83ce6d1ab7ec3ae7668" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.853048 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.864050 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qnk76"] Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.870568 4929 scope.go:117] "RemoveContainer" containerID="b1e8634e3903b9c6cd32dcf11570c58740860cda922c6b17bcbd8b56cb031948" Nov 22 08:12:55 crc kubenswrapper[4929]: I1122 08:12:55.958655 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22250678-650c-4258-a09e-854bdf010668" path="/var/lib/kubelet/pods/22250678-650c-4258-a09e-854bdf010668/volumes" Nov 22 08:14:18 crc kubenswrapper[4929]: I1122 08:14:18.595241 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:14:18 crc kubenswrapper[4929]: I1122 08:14:18.596388 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:14:48 crc kubenswrapper[4929]: I1122 08:14:48.594813 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:14:48 crc kubenswrapper[4929]: I1122 08:14:48.595572 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.158433 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg"] Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159460 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="extract-content" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159477 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="extract-content" Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159508 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="extract-content" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159517 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="extract-content" Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159554 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="extract-utilities" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159563 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="extract-utilities" Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159576 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="extract-utilities" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159583 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="extract-utilities" Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159594 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159602 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: E1122 08:15:00.159616 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159624 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159857 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="22250678-650c-4258-a09e-854bdf010668" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.159873 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="57f6b16a-dd67-4be4-a38d-101934de5c43" containerName="registry-server" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.160781 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.163374 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.178450 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg"] Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.184697 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.319622 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.320165 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6k6m\" (UniqueName: \"kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.320282 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.422708 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.422856 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.422887 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6k6m\" (UniqueName: \"kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.425203 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.432551 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.442018 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6k6m\" (UniqueName: \"kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m\") pod \"collect-profiles-29396655-vqrbg\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.502758 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:00 crc kubenswrapper[4929]: I1122 08:15:00.963264 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg"] Nov 22 08:15:01 crc kubenswrapper[4929]: I1122 08:15:01.086648 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" event={"ID":"45abdf09-4f43-4d10-a0b0-ca8ad230f639","Type":"ContainerStarted","Data":"c70540bbc525d04d7c8a75c23163761a21cc20227a5f76c9d8d273524fb4a9f2"} Nov 22 08:15:02 crc kubenswrapper[4929]: I1122 08:15:02.097735 4929 generic.go:334] "Generic (PLEG): container finished" podID="45abdf09-4f43-4d10-a0b0-ca8ad230f639" containerID="b2d68474533abcb44991aa7392adbae512cafdf99416f86b73a89b9b2ea318e1" exitCode=0 Nov 22 08:15:02 crc kubenswrapper[4929]: I1122 08:15:02.097869 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" event={"ID":"45abdf09-4f43-4d10-a0b0-ca8ad230f639","Type":"ContainerDied","Data":"b2d68474533abcb44991aa7392adbae512cafdf99416f86b73a89b9b2ea318e1"} Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.434848 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.596653 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume\") pod \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.596952 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume\") pod \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.597029 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6k6m\" (UniqueName: \"kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m\") pod \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\" (UID: \"45abdf09-4f43-4d10-a0b0-ca8ad230f639\") " Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.598497 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume" (OuterVolumeSpecName: "config-volume") pod "45abdf09-4f43-4d10-a0b0-ca8ad230f639" (UID: "45abdf09-4f43-4d10-a0b0-ca8ad230f639"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.604272 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "45abdf09-4f43-4d10-a0b0-ca8ad230f639" (UID: "45abdf09-4f43-4d10-a0b0-ca8ad230f639"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.605981 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m" (OuterVolumeSpecName: "kube-api-access-g6k6m") pod "45abdf09-4f43-4d10-a0b0-ca8ad230f639" (UID: "45abdf09-4f43-4d10-a0b0-ca8ad230f639"). InnerVolumeSpecName "kube-api-access-g6k6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.699739 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/45abdf09-4f43-4d10-a0b0-ca8ad230f639-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.699789 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6k6m\" (UniqueName: \"kubernetes.io/projected/45abdf09-4f43-4d10-a0b0-ca8ad230f639-kube-api-access-g6k6m\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:03 crc kubenswrapper[4929]: I1122 08:15:03.699803 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45abdf09-4f43-4d10-a0b0-ca8ad230f639-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:04 crc kubenswrapper[4929]: I1122 08:15:04.117747 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" event={"ID":"45abdf09-4f43-4d10-a0b0-ca8ad230f639","Type":"ContainerDied","Data":"c70540bbc525d04d7c8a75c23163761a21cc20227a5f76c9d8d273524fb4a9f2"} Nov 22 08:15:04 crc kubenswrapper[4929]: I1122 08:15:04.118039 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c70540bbc525d04d7c8a75c23163761a21cc20227a5f76c9d8d273524fb4a9f2" Nov 22 08:15:04 crc kubenswrapper[4929]: I1122 08:15:04.117786 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396655-vqrbg" Nov 22 08:15:04 crc kubenswrapper[4929]: I1122 08:15:04.517170 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw"] Nov 22 08:15:04 crc kubenswrapper[4929]: I1122 08:15:04.527097 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396610-lkpxw"] Nov 22 08:15:05 crc kubenswrapper[4929]: I1122 08:15:05.968178 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33bf4915-25d4-40b8-9082-9dff385e1f46" path="/var/lib/kubelet/pods/33bf4915-25d4-40b8-9082-9dff385e1f46/volumes" Nov 22 08:15:13 crc kubenswrapper[4929]: I1122 08:15:13.867870 4929 scope.go:117] "RemoveContainer" containerID="f63dab2d35f6105de938ebe7833e14068a260222ba52e5bb5ae3a7b92d6542ab" Nov 22 08:15:18 crc kubenswrapper[4929]: I1122 08:15:18.594978 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:15:18 crc kubenswrapper[4929]: I1122 08:15:18.595623 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:15:18 crc kubenswrapper[4929]: I1122 08:15:18.595674 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:15:18 crc kubenswrapper[4929]: I1122 08:15:18.596601 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:15:18 crc kubenswrapper[4929]: I1122 08:15:18.596661 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" gracePeriod=600 Nov 22 08:15:19 crc kubenswrapper[4929]: I1122 08:15:19.265193 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" exitCode=0 Nov 22 08:15:19 crc kubenswrapper[4929]: I1122 08:15:19.265256 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f"} Nov 22 08:15:19 crc kubenswrapper[4929]: I1122 08:15:19.265624 4929 scope.go:117] "RemoveContainer" containerID="ef7c385702fe51dea117fafc786255623c312f2fcfd89279145635a875374763" Nov 22 08:15:19 crc kubenswrapper[4929]: E1122 08:15:19.462113 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:15:20 crc kubenswrapper[4929]: I1122 08:15:20.279569 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:15:20 crc kubenswrapper[4929]: E1122 08:15:20.279864 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.332552 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:29 crc kubenswrapper[4929]: E1122 08:15:29.334318 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45abdf09-4f43-4d10-a0b0-ca8ad230f639" containerName="collect-profiles" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.334339 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="45abdf09-4f43-4d10-a0b0-ca8ad230f639" containerName="collect-profiles" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.334599 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="45abdf09-4f43-4d10-a0b0-ca8ad230f639" containerName="collect-profiles" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.337541 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.347317 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.474788 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9bnw\" (UniqueName: \"kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.475072 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.475666 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.578312 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.578502 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9bnw\" (UniqueName: \"kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.578636 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.578854 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.579418 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.602507 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9bnw\" (UniqueName: \"kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw\") pod \"certified-operators-dpsvs\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:29 crc kubenswrapper[4929]: I1122 08:15:29.713846 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:30 crc kubenswrapper[4929]: I1122 08:15:30.209436 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:30 crc kubenswrapper[4929]: I1122 08:15:30.375512 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerStarted","Data":"4b133f0c1f0ece41fd2c2a66f287a0480d271ff9810a1cdacdb9bd6570a17d13"} Nov 22 08:15:31 crc kubenswrapper[4929]: I1122 08:15:31.385845 4929 generic.go:334] "Generic (PLEG): container finished" podID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerID="9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664" exitCode=0 Nov 22 08:15:31 crc kubenswrapper[4929]: I1122 08:15:31.385919 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerDied","Data":"9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664"} Nov 22 08:15:34 crc kubenswrapper[4929]: I1122 08:15:34.416031 4929 generic.go:334] "Generic (PLEG): container finished" podID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerID="d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba" exitCode=0 Nov 22 08:15:34 crc kubenswrapper[4929]: I1122 08:15:34.416117 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerDied","Data":"d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba"} Nov 22 08:15:34 crc kubenswrapper[4929]: I1122 08:15:34.947978 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:15:34 crc kubenswrapper[4929]: E1122 08:15:34.948415 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:15:42 crc kubenswrapper[4929]: I1122 08:15:42.497058 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerStarted","Data":"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe"} Nov 22 08:15:42 crc kubenswrapper[4929]: I1122 08:15:42.527902 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dpsvs" podStartSLOduration=3.621896959 podStartE2EDuration="13.527883325s" podCreationTimestamp="2025-11-22 08:15:29 +0000 UTC" firstStartedPulling="2025-11-22 08:15:31.389686942 +0000 UTC m=+3868.499140955" lastFinishedPulling="2025-11-22 08:15:41.295673308 +0000 UTC m=+3878.405127321" observedRunningTime="2025-11-22 08:15:42.524154584 +0000 UTC m=+3879.633608627" watchObservedRunningTime="2025-11-22 08:15:42.527883325 +0000 UTC m=+3879.637337348" Nov 22 08:15:49 crc kubenswrapper[4929]: I1122 08:15:49.714799 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:49 crc kubenswrapper[4929]: I1122 08:15:49.715410 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:49 crc kubenswrapper[4929]: I1122 08:15:49.779159 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:49 crc kubenswrapper[4929]: I1122 08:15:49.955457 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:15:49 crc kubenswrapper[4929]: E1122 08:15:49.955697 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:15:51 crc kubenswrapper[4929]: I1122 08:15:51.119052 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:51 crc kubenswrapper[4929]: I1122 08:15:51.173640 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:52 crc kubenswrapper[4929]: I1122 08:15:52.589943 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dpsvs" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="registry-server" containerID="cri-o://02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe" gracePeriod=2 Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.301258 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.351514 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content\") pod \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.351742 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9bnw\" (UniqueName: \"kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw\") pod \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.351942 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities\") pod \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\" (UID: \"67a35c1d-3dcd-4c16-b2c3-bd0c29424239\") " Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.352851 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities" (OuterVolumeSpecName: "utilities") pod "67a35c1d-3dcd-4c16-b2c3-bd0c29424239" (UID: "67a35c1d-3dcd-4c16-b2c3-bd0c29424239"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.354043 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.360063 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw" (OuterVolumeSpecName: "kube-api-access-r9bnw") pod "67a35c1d-3dcd-4c16-b2c3-bd0c29424239" (UID: "67a35c1d-3dcd-4c16-b2c3-bd0c29424239"). InnerVolumeSpecName "kube-api-access-r9bnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.408185 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67a35c1d-3dcd-4c16-b2c3-bd0c29424239" (UID: "67a35c1d-3dcd-4c16-b2c3-bd0c29424239"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.455669 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9bnw\" (UniqueName: \"kubernetes.io/projected/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-kube-api-access-r9bnw\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.455705 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67a35c1d-3dcd-4c16-b2c3-bd0c29424239-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.628956 4929 generic.go:334] "Generic (PLEG): container finished" podID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerID="02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe" exitCode=0 Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.629004 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerDied","Data":"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe"} Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.629038 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dpsvs" event={"ID":"67a35c1d-3dcd-4c16-b2c3-bd0c29424239","Type":"ContainerDied","Data":"4b133f0c1f0ece41fd2c2a66f287a0480d271ff9810a1cdacdb9bd6570a17d13"} Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.629059 4929 scope.go:117] "RemoveContainer" containerID="02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.629059 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dpsvs" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.650247 4929 scope.go:117] "RemoveContainer" containerID="d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.675372 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.686359 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dpsvs"] Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.688542 4929 scope.go:117] "RemoveContainer" containerID="9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.732600 4929 scope.go:117] "RemoveContainer" containerID="02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe" Nov 22 08:15:54 crc kubenswrapper[4929]: E1122 08:15:54.735470 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe\": container with ID starting with 02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe not found: ID does not exist" containerID="02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.735517 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe"} err="failed to get container status \"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe\": rpc error: code = NotFound desc = could not find container \"02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe\": container with ID starting with 02050a417536705e2bb3cc4ed0636312f6ef4447f3a689d2f48b33002fc94fbe not found: ID does not exist" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.735548 4929 scope.go:117] "RemoveContainer" containerID="d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba" Nov 22 08:15:54 crc kubenswrapper[4929]: E1122 08:15:54.736027 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba\": container with ID starting with d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba not found: ID does not exist" containerID="d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.736058 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba"} err="failed to get container status \"d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba\": rpc error: code = NotFound desc = could not find container \"d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba\": container with ID starting with d8fe2200390e8c7a5e8756064b0cf855fff5c6a816731d014a4ee193ec593cba not found: ID does not exist" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.736072 4929 scope.go:117] "RemoveContainer" containerID="9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664" Nov 22 08:15:54 crc kubenswrapper[4929]: E1122 08:15:54.736436 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664\": container with ID starting with 9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664 not found: ID does not exist" containerID="9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664" Nov 22 08:15:54 crc kubenswrapper[4929]: I1122 08:15:54.736465 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664"} err="failed to get container status \"9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664\": rpc error: code = NotFound desc = could not find container \"9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664\": container with ID starting with 9dc90b8951211fd03f43dfecf1d61beb2c35ed21b4fbb9f3b175394f61ff6664 not found: ID does not exist" Nov 22 08:15:55 crc kubenswrapper[4929]: I1122 08:15:55.958995 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" path="/var/lib/kubelet/pods/67a35c1d-3dcd-4c16-b2c3-bd0c29424239/volumes" Nov 22 08:16:02 crc kubenswrapper[4929]: I1122 08:16:02.948487 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:16:02 crc kubenswrapper[4929]: E1122 08:16:02.949700 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:16:16 crc kubenswrapper[4929]: I1122 08:16:16.947907 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:16:16 crc kubenswrapper[4929]: E1122 08:16:16.949101 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:16:30 crc kubenswrapper[4929]: I1122 08:16:30.947476 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:16:30 crc kubenswrapper[4929]: E1122 08:16:30.948501 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:16:43 crc kubenswrapper[4929]: I1122 08:16:43.958009 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:16:43 crc kubenswrapper[4929]: E1122 08:16:43.959223 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:16:54 crc kubenswrapper[4929]: I1122 08:16:54.947100 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:16:54 crc kubenswrapper[4929]: E1122 08:16:54.947871 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:17:07 crc kubenswrapper[4929]: I1122 08:17:07.947167 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:17:07 crc kubenswrapper[4929]: E1122 08:17:07.947979 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:17:19 crc kubenswrapper[4929]: I1122 08:17:19.948478 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:17:19 crc kubenswrapper[4929]: E1122 08:17:19.949519 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:17:30 crc kubenswrapper[4929]: I1122 08:17:30.947835 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:17:30 crc kubenswrapper[4929]: E1122 08:17:30.950326 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:17:41 crc kubenswrapper[4929]: I1122 08:17:41.947685 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:17:41 crc kubenswrapper[4929]: E1122 08:17:41.950863 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:17:55 crc kubenswrapper[4929]: I1122 08:17:55.947592 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:17:55 crc kubenswrapper[4929]: E1122 08:17:55.948640 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:18:06 crc kubenswrapper[4929]: I1122 08:18:06.947536 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:18:06 crc kubenswrapper[4929]: E1122 08:18:06.948360 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:18:19 crc kubenswrapper[4929]: I1122 08:18:19.947476 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:18:19 crc kubenswrapper[4929]: E1122 08:18:19.948199 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:18:34 crc kubenswrapper[4929]: I1122 08:18:34.948259 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:18:34 crc kubenswrapper[4929]: E1122 08:18:34.949818 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:18:48 crc kubenswrapper[4929]: I1122 08:18:48.947416 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:18:48 crc kubenswrapper[4929]: E1122 08:18:48.948342 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:19:01 crc kubenswrapper[4929]: I1122 08:19:01.947159 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:19:01 crc kubenswrapper[4929]: E1122 08:19:01.948087 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:19:15 crc kubenswrapper[4929]: I1122 08:19:15.947867 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:19:15 crc kubenswrapper[4929]: E1122 08:19:15.948918 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:19:29 crc kubenswrapper[4929]: I1122 08:19:29.948365 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:19:29 crc kubenswrapper[4929]: E1122 08:19:29.949383 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:19:42 crc kubenswrapper[4929]: I1122 08:19:42.946872 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:19:42 crc kubenswrapper[4929]: E1122 08:19:42.947623 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:19:57 crc kubenswrapper[4929]: I1122 08:19:57.947425 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:19:57 crc kubenswrapper[4929]: E1122 08:19:57.948308 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:20:12 crc kubenswrapper[4929]: I1122 08:20:12.947613 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:20:12 crc kubenswrapper[4929]: E1122 08:20:12.948394 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:20:23 crc kubenswrapper[4929]: I1122 08:20:23.954522 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:20:24 crc kubenswrapper[4929]: I1122 08:20:24.436416 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b"} Nov 22 08:22:48 crc kubenswrapper[4929]: I1122 08:22:48.594298 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:22:48 crc kubenswrapper[4929]: I1122 08:22:48.594935 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.621077 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:22:57 crc kubenswrapper[4929]: E1122 08:22:57.622455 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="extract-content" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.622473 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="extract-content" Nov 22 08:22:57 crc kubenswrapper[4929]: E1122 08:22:57.622490 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="extract-utilities" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.622502 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="extract-utilities" Nov 22 08:22:57 crc kubenswrapper[4929]: E1122 08:22:57.622546 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="registry-server" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.622556 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="registry-server" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.622843 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="67a35c1d-3dcd-4c16-b2c3-bd0c29424239" containerName="registry-server" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.627739 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.651113 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.805172 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.805271 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.805361 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk8jn\" (UniqueName: \"kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.907100 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk8jn\" (UniqueName: \"kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.907302 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.907340 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.908005 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.908028 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.931747 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk8jn\" (UniqueName: \"kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn\") pod \"community-operators-8zg7s\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:57 crc kubenswrapper[4929]: I1122 08:22:57.957964 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:22:58 crc kubenswrapper[4929]: I1122 08:22:58.495841 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:22:58 crc kubenswrapper[4929]: I1122 08:22:58.884165 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerStarted","Data":"07cda3fd3144a8ddc3b644c9aae26c63d54b51309e26402b587b055eb4388cb0"} Nov 22 08:22:59 crc kubenswrapper[4929]: I1122 08:22:59.895709 4929 generic.go:334] "Generic (PLEG): container finished" podID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerID="a12ee586bfd9145b5550a5ba7be33d17e9d86c9915899ef2592e05bc9fb5c70e" exitCode=0 Nov 22 08:22:59 crc kubenswrapper[4929]: I1122 08:22:59.895792 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerDied","Data":"a12ee586bfd9145b5550a5ba7be33d17e9d86c9915899ef2592e05bc9fb5c70e"} Nov 22 08:22:59 crc kubenswrapper[4929]: I1122 08:22:59.899469 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 08:23:01 crc kubenswrapper[4929]: I1122 08:23:01.914414 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerStarted","Data":"14bfcc68b44f588f29af66f3b14a65cdfee106b2d88e6969ff2944f2fe153d11"} Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.002538 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.004818 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.011028 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.130845 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.130935 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq8kk\" (UniqueName: \"kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.131004 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.233293 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.233377 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq8kk\" (UniqueName: \"kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.233432 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.233893 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.233936 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.671838 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq8kk\" (UniqueName: \"kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk\") pod \"redhat-operators-bbklg\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.927264 4929 generic.go:334] "Generic (PLEG): container finished" podID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerID="14bfcc68b44f588f29af66f3b14a65cdfee106b2d88e6969ff2944f2fe153d11" exitCode=0 Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.927377 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerDied","Data":"14bfcc68b44f588f29af66f3b14a65cdfee106b2d88e6969ff2944f2fe153d11"} Nov 22 08:23:02 crc kubenswrapper[4929]: I1122 08:23:02.928512 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:03 crc kubenswrapper[4929]: I1122 08:23:03.478098 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:03 crc kubenswrapper[4929]: I1122 08:23:03.940178 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerStarted","Data":"4f01b59ac2ae0e33669858f9fa9afe203c81bd5d6afabcb58c710ff4424a89c7"} Nov 22 08:23:03 crc kubenswrapper[4929]: I1122 08:23:03.943420 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerStarted","Data":"db101b5789cff3edff355a08c1b5633d4cbe31ac5e38b6388dca47b16b4d5270"} Nov 22 08:23:04 crc kubenswrapper[4929]: I1122 08:23:04.953523 4929 generic.go:334] "Generic (PLEG): container finished" podID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerID="ab2a7e68de963ff25a6a822282641d98519975bae6dd649e8faa6107a106b45e" exitCode=0 Nov 22 08:23:04 crc kubenswrapper[4929]: I1122 08:23:04.953602 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerDied","Data":"ab2a7e68de963ff25a6a822282641d98519975bae6dd649e8faa6107a106b45e"} Nov 22 08:23:04 crc kubenswrapper[4929]: I1122 08:23:04.995659 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8zg7s" podStartSLOduration=4.587936001 podStartE2EDuration="7.995634405s" podCreationTimestamp="2025-11-22 08:22:57 +0000 UTC" firstStartedPulling="2025-11-22 08:22:59.899116941 +0000 UTC m=+4317.008570954" lastFinishedPulling="2025-11-22 08:23:03.306815345 +0000 UTC m=+4320.416269358" observedRunningTime="2025-11-22 08:23:04.989432701 +0000 UTC m=+4322.098886734" watchObservedRunningTime="2025-11-22 08:23:04.995634405 +0000 UTC m=+4322.105088418" Nov 22 08:23:05 crc kubenswrapper[4929]: I1122 08:23:05.963729 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerStarted","Data":"00aa56c54a4dc83b71c4dffce9c1a3ba13795713da3358da410f603a8485c264"} Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.196027 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.198805 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.212281 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.212424 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhv88\" (UniqueName: \"kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.212561 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.224323 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.314514 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.314588 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhv88\" (UniqueName: \"kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.314685 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.314986 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.315158 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.360224 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhv88\" (UniqueName: \"kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88\") pod \"redhat-marketplace-66bzw\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.532082 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.989892 4929 generic.go:334] "Generic (PLEG): container finished" podID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerID="00aa56c54a4dc83b71c4dffce9c1a3ba13795713da3358da410f603a8485c264" exitCode=0 Nov 22 08:23:06 crc kubenswrapper[4929]: I1122 08:23:06.990038 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerDied","Data":"00aa56c54a4dc83b71c4dffce9c1a3ba13795713da3358da410f603a8485c264"} Nov 22 08:23:07 crc kubenswrapper[4929]: I1122 08:23:07.177678 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:07 crc kubenswrapper[4929]: W1122 08:23:07.182502 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd40dd099_35ca_4180_a452_d1be8727c784.slice/crio-f704d16da04c5654ba99f04eb95f6b8b41701e7248caa3925743b3c347597937 WatchSource:0}: Error finding container f704d16da04c5654ba99f04eb95f6b8b41701e7248caa3925743b3c347597937: Status 404 returned error can't find the container with id f704d16da04c5654ba99f04eb95f6b8b41701e7248caa3925743b3c347597937 Nov 22 08:23:07 crc kubenswrapper[4929]: I1122 08:23:07.997470 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:07 crc kubenswrapper[4929]: I1122 08:23:07.997556 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:08 crc kubenswrapper[4929]: I1122 08:23:08.002405 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerStarted","Data":"103c342178726d84489e38b0ed4b74b7607d592ded3bed629ee5925ea3f1943b"} Nov 22 08:23:08 crc kubenswrapper[4929]: I1122 08:23:08.002483 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerStarted","Data":"f704d16da04c5654ba99f04eb95f6b8b41701e7248caa3925743b3c347597937"} Nov 22 08:23:08 crc kubenswrapper[4929]: I1122 08:23:08.008266 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:09 crc kubenswrapper[4929]: I1122 08:23:09.085110 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:12 crc kubenswrapper[4929]: I1122 08:23:12.038775 4929 generic.go:334] "Generic (PLEG): container finished" podID="d40dd099-35ca-4180-a452-d1be8727c784" containerID="103c342178726d84489e38b0ed4b74b7607d592ded3bed629ee5925ea3f1943b" exitCode=0 Nov 22 08:23:12 crc kubenswrapper[4929]: I1122 08:23:12.038895 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerDied","Data":"103c342178726d84489e38b0ed4b74b7607d592ded3bed629ee5925ea3f1943b"} Nov 22 08:23:12 crc kubenswrapper[4929]: I1122 08:23:12.787772 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:23:12 crc kubenswrapper[4929]: I1122 08:23:12.788024 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8zg7s" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="registry-server" containerID="cri-o://db101b5789cff3edff355a08c1b5633d4cbe31ac5e38b6388dca47b16b4d5270" gracePeriod=2 Nov 22 08:23:13 crc kubenswrapper[4929]: I1122 08:23:13.055585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerStarted","Data":"58670cd326b103f250ce6e34e9af3f19dc95fac2ba8d494c9595be450223d3a0"} Nov 22 08:23:13 crc kubenswrapper[4929]: I1122 08:23:13.058937 4929 generic.go:334] "Generic (PLEG): container finished" podID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerID="db101b5789cff3edff355a08c1b5633d4cbe31ac5e38b6388dca47b16b4d5270" exitCode=0 Nov 22 08:23:13 crc kubenswrapper[4929]: I1122 08:23:13.058979 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerDied","Data":"db101b5789cff3edff355a08c1b5633d4cbe31ac5e38b6388dca47b16b4d5270"} Nov 22 08:23:13 crc kubenswrapper[4929]: I1122 08:23:13.084777 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bbklg" podStartSLOduration=6.572877774 podStartE2EDuration="12.084755945s" podCreationTimestamp="2025-11-22 08:23:01 +0000 UTC" firstStartedPulling="2025-11-22 08:23:04.956453983 +0000 UTC m=+4322.065907996" lastFinishedPulling="2025-11-22 08:23:10.468332154 +0000 UTC m=+4327.577786167" observedRunningTime="2025-11-22 08:23:13.073343632 +0000 UTC m=+4330.182797645" watchObservedRunningTime="2025-11-22 08:23:13.084755945 +0000 UTC m=+4330.194209958" Nov 22 08:23:13 crc kubenswrapper[4929]: I1122 08:23:13.990637 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.071264 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8zg7s" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.071199 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8zg7s" event={"ID":"82ea276a-fbd5-4fea-a8a3-521ba112c7a1","Type":"ContainerDied","Data":"07cda3fd3144a8ddc3b644c9aae26c63d54b51309e26402b587b055eb4388cb0"} Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.072148 4929 scope.go:117] "RemoveContainer" containerID="db101b5789cff3edff355a08c1b5633d4cbe31ac5e38b6388dca47b16b4d5270" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.107301 4929 scope.go:117] "RemoveContainer" containerID="14bfcc68b44f588f29af66f3b14a65cdfee106b2d88e6969ff2944f2fe153d11" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.127802 4929 scope.go:117] "RemoveContainer" containerID="a12ee586bfd9145b5550a5ba7be33d17e9d86c9915899ef2592e05bc9fb5c70e" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.187194 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities\") pod \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.187315 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content\") pod \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.187662 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jk8jn\" (UniqueName: \"kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn\") pod \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\" (UID: \"82ea276a-fbd5-4fea-a8a3-521ba112c7a1\") " Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.187674 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities" (OuterVolumeSpecName: "utilities") pod "82ea276a-fbd5-4fea-a8a3-521ba112c7a1" (UID: "82ea276a-fbd5-4fea-a8a3-521ba112c7a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.188742 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.194062 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn" (OuterVolumeSpecName: "kube-api-access-jk8jn") pod "82ea276a-fbd5-4fea-a8a3-521ba112c7a1" (UID: "82ea276a-fbd5-4fea-a8a3-521ba112c7a1"). InnerVolumeSpecName "kube-api-access-jk8jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.254555 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "82ea276a-fbd5-4fea-a8a3-521ba112c7a1" (UID: "82ea276a-fbd5-4fea-a8a3-521ba112c7a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.290736 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.291014 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jk8jn\" (UniqueName: \"kubernetes.io/projected/82ea276a-fbd5-4fea-a8a3-521ba112c7a1-kube-api-access-jk8jn\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.412131 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:23:14 crc kubenswrapper[4929]: I1122 08:23:14.421733 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8zg7s"] Nov 22 08:23:15 crc kubenswrapper[4929]: I1122 08:23:15.082549 4929 generic.go:334] "Generic (PLEG): container finished" podID="d40dd099-35ca-4180-a452-d1be8727c784" containerID="0fee8094f66c97ff7119c016d60e99105770a8bd1f3cc9270d61352607f6ab8a" exitCode=0 Nov 22 08:23:15 crc kubenswrapper[4929]: I1122 08:23:15.082913 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerDied","Data":"0fee8094f66c97ff7119c016d60e99105770a8bd1f3cc9270d61352607f6ab8a"} Nov 22 08:23:15 crc kubenswrapper[4929]: I1122 08:23:15.959858 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" path="/var/lib/kubelet/pods/82ea276a-fbd5-4fea-a8a3-521ba112c7a1/volumes" Nov 22 08:23:17 crc kubenswrapper[4929]: I1122 08:23:17.105537 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerStarted","Data":"cd72929b6c003b824c16ed3a6c807fb83ae839a4c1cddbfc5e6561437b1c6e50"} Nov 22 08:23:17 crc kubenswrapper[4929]: I1122 08:23:17.129067 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-66bzw" podStartSLOduration=7.047101152 podStartE2EDuration="11.129037858s" podCreationTimestamp="2025-11-22 08:23:06 +0000 UTC" firstStartedPulling="2025-11-22 08:23:12.041875676 +0000 UTC m=+4329.151329689" lastFinishedPulling="2025-11-22 08:23:16.123812382 +0000 UTC m=+4333.233266395" observedRunningTime="2025-11-22 08:23:17.120931537 +0000 UTC m=+4334.230385550" watchObservedRunningTime="2025-11-22 08:23:17.129037858 +0000 UTC m=+4334.238491871" Nov 22 08:23:18 crc kubenswrapper[4929]: I1122 08:23:18.594840 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:23:18 crc kubenswrapper[4929]: I1122 08:23:18.594929 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:23:22 crc kubenswrapper[4929]: I1122 08:23:22.928685 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:22 crc kubenswrapper[4929]: I1122 08:23:22.929345 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:22 crc kubenswrapper[4929]: I1122 08:23:22.973576 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:23 crc kubenswrapper[4929]: I1122 08:23:23.215798 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:24 crc kubenswrapper[4929]: I1122 08:23:24.992063 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:25 crc kubenswrapper[4929]: I1122 08:23:25.182045 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bbklg" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="registry-server" containerID="cri-o://58670cd326b103f250ce6e34e9af3f19dc95fac2ba8d494c9595be450223d3a0" gracePeriod=2 Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.193358 4929 generic.go:334] "Generic (PLEG): container finished" podID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerID="58670cd326b103f250ce6e34e9af3f19dc95fac2ba8d494c9595be450223d3a0" exitCode=0 Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.193427 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerDied","Data":"58670cd326b103f250ce6e34e9af3f19dc95fac2ba8d494c9595be450223d3a0"} Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.532685 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.532741 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.593088 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.688810 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.852462 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq8kk\" (UniqueName: \"kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk\") pod \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.852576 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities\") pod \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.852627 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content\") pod \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\" (UID: \"6bc0d329-bdc0-48fb-b847-e42a068e6c6e\") " Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.853470 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities" (OuterVolumeSpecName: "utilities") pod "6bc0d329-bdc0-48fb-b847-e42a068e6c6e" (UID: "6bc0d329-bdc0-48fb-b847-e42a068e6c6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.861599 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk" (OuterVolumeSpecName: "kube-api-access-cq8kk") pod "6bc0d329-bdc0-48fb-b847-e42a068e6c6e" (UID: "6bc0d329-bdc0-48fb-b847-e42a068e6c6e"). InnerVolumeSpecName "kube-api-access-cq8kk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.953752 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6bc0d329-bdc0-48fb-b847-e42a068e6c6e" (UID: "6bc0d329-bdc0-48fb-b847-e42a068e6c6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.954819 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq8kk\" (UniqueName: \"kubernetes.io/projected/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-kube-api-access-cq8kk\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.954887 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:26 crc kubenswrapper[4929]: I1122 08:23:26.954901 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6bc0d329-bdc0-48fb-b847-e42a068e6c6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.212561 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bbklg" event={"ID":"6bc0d329-bdc0-48fb-b847-e42a068e6c6e","Type":"ContainerDied","Data":"4f01b59ac2ae0e33669858f9fa9afe203c81bd5d6afabcb58c710ff4424a89c7"} Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.212639 4929 scope.go:117] "RemoveContainer" containerID="58670cd326b103f250ce6e34e9af3f19dc95fac2ba8d494c9595be450223d3a0" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.212671 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bbklg" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.257018 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.265837 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bbklg"] Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.268536 4929 scope.go:117] "RemoveContainer" containerID="00aa56c54a4dc83b71c4dffce9c1a3ba13795713da3358da410f603a8485c264" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.291817 4929 scope.go:117] "RemoveContainer" containerID="ab2a7e68de963ff25a6a822282641d98519975bae6dd649e8faa6107a106b45e" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.303393 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:27 crc kubenswrapper[4929]: I1122 08:23:27.959189 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" path="/var/lib/kubelet/pods/6bc0d329-bdc0-48fb-b847-e42a068e6c6e/volumes" Nov 22 08:23:31 crc kubenswrapper[4929]: I1122 08:23:31.392479 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:31 crc kubenswrapper[4929]: I1122 08:23:31.393301 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-66bzw" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="registry-server" containerID="cri-o://cd72929b6c003b824c16ed3a6c807fb83ae839a4c1cddbfc5e6561437b1c6e50" gracePeriod=2 Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.268394 4929 generic.go:334] "Generic (PLEG): container finished" podID="d40dd099-35ca-4180-a452-d1be8727c784" containerID="cd72929b6c003b824c16ed3a6c807fb83ae839a4c1cddbfc5e6561437b1c6e50" exitCode=0 Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.271551 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerDied","Data":"cd72929b6c003b824c16ed3a6c807fb83ae839a4c1cddbfc5e6561437b1c6e50"} Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.658507 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.779874 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities\") pod \"d40dd099-35ca-4180-a452-d1be8727c784\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.779936 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhv88\" (UniqueName: \"kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88\") pod \"d40dd099-35ca-4180-a452-d1be8727c784\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.779998 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content\") pod \"d40dd099-35ca-4180-a452-d1be8727c784\" (UID: \"d40dd099-35ca-4180-a452-d1be8727c784\") " Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.780762 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities" (OuterVolumeSpecName: "utilities") pod "d40dd099-35ca-4180-a452-d1be8727c784" (UID: "d40dd099-35ca-4180-a452-d1be8727c784"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.799145 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88" (OuterVolumeSpecName: "kube-api-access-fhv88") pod "d40dd099-35ca-4180-a452-d1be8727c784" (UID: "d40dd099-35ca-4180-a452-d1be8727c784"). InnerVolumeSpecName "kube-api-access-fhv88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.800360 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d40dd099-35ca-4180-a452-d1be8727c784" (UID: "d40dd099-35ca-4180-a452-d1be8727c784"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.882881 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.882934 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhv88\" (UniqueName: \"kubernetes.io/projected/d40dd099-35ca-4180-a452-d1be8727c784-kube-api-access-fhv88\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:32 crc kubenswrapper[4929]: I1122 08:23:32.882948 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d40dd099-35ca-4180-a452-d1be8727c784-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.285096 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-66bzw" event={"ID":"d40dd099-35ca-4180-a452-d1be8727c784","Type":"ContainerDied","Data":"f704d16da04c5654ba99f04eb95f6b8b41701e7248caa3925743b3c347597937"} Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.285152 4929 scope.go:117] "RemoveContainer" containerID="cd72929b6c003b824c16ed3a6c807fb83ae839a4c1cddbfc5e6561437b1c6e50" Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.285183 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-66bzw" Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.307820 4929 scope.go:117] "RemoveContainer" containerID="0fee8094f66c97ff7119c016d60e99105770a8bd1f3cc9270d61352607f6ab8a" Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.322942 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.338056 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-66bzw"] Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.353358 4929 scope.go:117] "RemoveContainer" containerID="103c342178726d84489e38b0ed4b74b7607d592ded3bed629ee5925ea3f1943b" Nov 22 08:23:33 crc kubenswrapper[4929]: I1122 08:23:33.958139 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d40dd099-35ca-4180-a452-d1be8727c784" path="/var/lib/kubelet/pods/d40dd099-35ca-4180-a452-d1be8727c784/volumes" Nov 22 08:23:48 crc kubenswrapper[4929]: I1122 08:23:48.594884 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:23:48 crc kubenswrapper[4929]: I1122 08:23:48.595482 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:23:48 crc kubenswrapper[4929]: I1122 08:23:48.595530 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:23:48 crc kubenswrapper[4929]: I1122 08:23:48.596295 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:23:48 crc kubenswrapper[4929]: I1122 08:23:48.596342 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b" gracePeriod=600 Nov 22 08:23:49 crc kubenswrapper[4929]: I1122 08:23:49.439863 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b" exitCode=0 Nov 22 08:23:49 crc kubenswrapper[4929]: I1122 08:23:49.439950 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b"} Nov 22 08:23:49 crc kubenswrapper[4929]: I1122 08:23:49.440439 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53"} Nov 22 08:23:49 crc kubenswrapper[4929]: I1122 08:23:49.440466 4929 scope.go:117] "RemoveContainer" containerID="08527a930710aef5b4b1cbe97d1fc8d099be0892c50e98086ec5cbd08f15ea0f" Nov 22 08:26:18 crc kubenswrapper[4929]: I1122 08:26:18.595000 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:26:18 crc kubenswrapper[4929]: I1122 08:26:18.595593 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.962172 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963099 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963116 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963127 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963133 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963144 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963153 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963170 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963177 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963189 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963196 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963229 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963241 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="extract-utilities" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963275 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963282 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963292 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963297 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="extract-content" Nov 22 08:26:42 crc kubenswrapper[4929]: E1122 08:26:42.963310 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963316 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963487 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ea276a-fbd5-4fea-a8a3-521ba112c7a1" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963501 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="d40dd099-35ca-4180-a452-d1be8727c784" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.963519 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bc0d329-bdc0-48fb-b847-e42a068e6c6e" containerName="registry-server" Nov 22 08:26:42 crc kubenswrapper[4929]: I1122 08:26:42.964979 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.033259 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.102133 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.102231 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.102482 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br628\" (UniqueName: \"kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.204249 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.204379 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.204487 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br628\" (UniqueName: \"kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.204919 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.204983 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.228180 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br628\" (UniqueName: \"kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628\") pod \"certified-operators-z99vt\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.292747 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:43 crc kubenswrapper[4929]: I1122 08:26:43.876196 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:44 crc kubenswrapper[4929]: I1122 08:26:44.217002 4929 generic.go:334] "Generic (PLEG): container finished" podID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerID="b88d8f19731115895454aba7e0557910112363bb89b8cd391fe061d8f92723cc" exitCode=0 Nov 22 08:26:44 crc kubenswrapper[4929]: I1122 08:26:44.217127 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerDied","Data":"b88d8f19731115895454aba7e0557910112363bb89b8cd391fe061d8f92723cc"} Nov 22 08:26:44 crc kubenswrapper[4929]: I1122 08:26:44.217375 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerStarted","Data":"5011c690a8cf34044c44059d1b028fd079fddc0fde68db6cc93611366b00f942"} Nov 22 08:26:45 crc kubenswrapper[4929]: I1122 08:26:45.227877 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerStarted","Data":"a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a"} Nov 22 08:26:45 crc kubenswrapper[4929]: E1122 08:26:45.591517 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod349c59ff_8e59_4a29_b2d9_e02ba0ef7a18.slice/crio-a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod349c59ff_8e59_4a29_b2d9_e02ba0ef7a18.slice/crio-conmon-a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a.scope\": RecentStats: unable to find data in memory cache]" Nov 22 08:26:46 crc kubenswrapper[4929]: I1122 08:26:46.237425 4929 generic.go:334] "Generic (PLEG): container finished" podID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerID="a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a" exitCode=0 Nov 22 08:26:46 crc kubenswrapper[4929]: I1122 08:26:46.237471 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerDied","Data":"a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a"} Nov 22 08:26:47 crc kubenswrapper[4929]: I1122 08:26:47.255387 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerStarted","Data":"d7666b686a5fe229c39817c6e8f4a3117b8a7f9bdfbd71f5f36589ece0e0ed08"} Nov 22 08:26:47 crc kubenswrapper[4929]: I1122 08:26:47.280788 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z99vt" podStartSLOduration=2.566980293 podStartE2EDuration="5.280758088s" podCreationTimestamp="2025-11-22 08:26:42 +0000 UTC" firstStartedPulling="2025-11-22 08:26:44.218869645 +0000 UTC m=+4541.328323658" lastFinishedPulling="2025-11-22 08:26:46.93264743 +0000 UTC m=+4544.042101453" observedRunningTime="2025-11-22 08:26:47.269482099 +0000 UTC m=+4544.378936132" watchObservedRunningTime="2025-11-22 08:26:47.280758088 +0000 UTC m=+4544.390212101" Nov 22 08:26:48 crc kubenswrapper[4929]: I1122 08:26:48.594769 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:26:48 crc kubenswrapper[4929]: I1122 08:26:48.595129 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:26:53 crc kubenswrapper[4929]: I1122 08:26:53.294092 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:53 crc kubenswrapper[4929]: I1122 08:26:53.294677 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:53 crc kubenswrapper[4929]: I1122 08:26:53.437560 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:53 crc kubenswrapper[4929]: I1122 08:26:53.489690 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:53 crc kubenswrapper[4929]: I1122 08:26:53.676523 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:55 crc kubenswrapper[4929]: I1122 08:26:55.337077 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z99vt" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="registry-server" containerID="cri-o://d7666b686a5fe229c39817c6e8f4a3117b8a7f9bdfbd71f5f36589ece0e0ed08" gracePeriod=2 Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.350041 4929 generic.go:334] "Generic (PLEG): container finished" podID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerID="d7666b686a5fe229c39817c6e8f4a3117b8a7f9bdfbd71f5f36589ece0e0ed08" exitCode=0 Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.350158 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerDied","Data":"d7666b686a5fe229c39817c6e8f4a3117b8a7f9bdfbd71f5f36589ece0e0ed08"} Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.350428 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z99vt" event={"ID":"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18","Type":"ContainerDied","Data":"5011c690a8cf34044c44059d1b028fd079fddc0fde68db6cc93611366b00f942"} Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.350446 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5011c690a8cf34044c44059d1b028fd079fddc0fde68db6cc93611366b00f942" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.351029 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.363253 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities\") pod \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.363561 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br628\" (UniqueName: \"kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628\") pod \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.363773 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content\") pod \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\" (UID: \"349c59ff-8e59-4a29-b2d9-e02ba0ef7a18\") " Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.365766 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities" (OuterVolumeSpecName: "utilities") pod "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" (UID: "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.371860 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628" (OuterVolumeSpecName: "kube-api-access-br628") pod "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" (UID: "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18"). InnerVolumeSpecName "kube-api-access-br628". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.417430 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" (UID: "349c59ff-8e59-4a29-b2d9-e02ba0ef7a18"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.465866 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br628\" (UniqueName: \"kubernetes.io/projected/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-kube-api-access-br628\") on node \"crc\" DevicePath \"\"" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.465917 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:26:56 crc kubenswrapper[4929]: I1122 08:26:56.465932 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:26:57 crc kubenswrapper[4929]: I1122 08:26:57.359497 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z99vt" Nov 22 08:26:57 crc kubenswrapper[4929]: I1122 08:26:57.401421 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:57 crc kubenswrapper[4929]: I1122 08:26:57.413839 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z99vt"] Nov 22 08:26:57 crc kubenswrapper[4929]: I1122 08:26:57.960798 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" path="/var/lib/kubelet/pods/349c59ff-8e59-4a29-b2d9-e02ba0ef7a18/volumes" Nov 22 08:27:18 crc kubenswrapper[4929]: I1122 08:27:18.594307 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:27:18 crc kubenswrapper[4929]: I1122 08:27:18.594919 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:27:18 crc kubenswrapper[4929]: I1122 08:27:18.594964 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:27:18 crc kubenswrapper[4929]: I1122 08:27:18.595818 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:27:18 crc kubenswrapper[4929]: I1122 08:27:18.595884 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" gracePeriod=600 Nov 22 08:27:18 crc kubenswrapper[4929]: E1122 08:27:18.776760 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:27:19 crc kubenswrapper[4929]: I1122 08:27:19.535577 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" exitCode=0 Nov 22 08:27:19 crc kubenswrapper[4929]: I1122 08:27:19.536022 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53"} Nov 22 08:27:19 crc kubenswrapper[4929]: I1122 08:27:19.536307 4929 scope.go:117] "RemoveContainer" containerID="043df286f9fe05344f4647284c184435999e8b795f9ba5320e307d411ae8301b" Nov 22 08:27:19 crc kubenswrapper[4929]: I1122 08:27:19.536972 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:27:19 crc kubenswrapper[4929]: E1122 08:27:19.537322 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:27:34 crc kubenswrapper[4929]: I1122 08:27:34.947986 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:27:34 crc kubenswrapper[4929]: E1122 08:27:34.948935 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:27:49 crc kubenswrapper[4929]: I1122 08:27:49.947917 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:27:49 crc kubenswrapper[4929]: E1122 08:27:49.948726 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:28:01 crc kubenswrapper[4929]: I1122 08:28:01.947530 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:28:01 crc kubenswrapper[4929]: E1122 08:28:01.948382 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:28:14 crc kubenswrapper[4929]: I1122 08:28:14.947829 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:28:14 crc kubenswrapper[4929]: E1122 08:28:14.948725 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:28:26 crc kubenswrapper[4929]: I1122 08:28:26.949234 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:28:26 crc kubenswrapper[4929]: E1122 08:28:26.950846 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:28:39 crc kubenswrapper[4929]: I1122 08:28:39.947718 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:28:39 crc kubenswrapper[4929]: E1122 08:28:39.948974 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:28:51 crc kubenswrapper[4929]: I1122 08:28:51.948624 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:28:51 crc kubenswrapper[4929]: E1122 08:28:51.949660 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:29:06 crc kubenswrapper[4929]: I1122 08:29:06.947224 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:29:06 crc kubenswrapper[4929]: E1122 08:29:06.948002 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:29:21 crc kubenswrapper[4929]: I1122 08:29:21.948101 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:29:21 crc kubenswrapper[4929]: E1122 08:29:21.948889 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:29:34 crc kubenswrapper[4929]: I1122 08:29:34.947741 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:29:34 crc kubenswrapper[4929]: E1122 08:29:34.948673 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:29:48 crc kubenswrapper[4929]: I1122 08:29:48.947196 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:29:48 crc kubenswrapper[4929]: E1122 08:29:48.950110 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.146285 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps"] Nov 22 08:30:00 crc kubenswrapper[4929]: E1122 08:30:00.147382 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="extract-utilities" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.147407 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="extract-utilities" Nov 22 08:30:00 crc kubenswrapper[4929]: E1122 08:30:00.147425 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="extract-content" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.147434 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="extract-content" Nov 22 08:30:00 crc kubenswrapper[4929]: E1122 08:30:00.147454 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="registry-server" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.147463 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="registry-server" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.147774 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="349c59ff-8e59-4a29-b2d9-e02ba0ef7a18" containerName="registry-server" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.148607 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.154836 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.155093 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.161941 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps"] Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.265434 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.265613 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxw87\" (UniqueName: \"kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.265673 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.367233 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.367430 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxw87\" (UniqueName: \"kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.367489 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.368463 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.373136 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.383844 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxw87\" (UniqueName: \"kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87\") pod \"collect-profiles-29396670-52mps\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.470729 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.918944 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps"] Nov 22 08:30:00 crc kubenswrapper[4929]: I1122 08:30:00.947677 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:30:00 crc kubenswrapper[4929]: E1122 08:30:00.947930 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:30:01 crc kubenswrapper[4929]: I1122 08:30:01.051794 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" event={"ID":"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d","Type":"ContainerStarted","Data":"633851aa2ceb38d5d46b200becc8161822c703f0cb865b1691ad69e882c0d675"} Nov 22 08:30:02 crc kubenswrapper[4929]: I1122 08:30:02.062069 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" event={"ID":"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d","Type":"ContainerStarted","Data":"838785e50140799082310c7e2c362318c91a3e48a2ed057ca796e0c3e5ced132"} Nov 22 08:30:02 crc kubenswrapper[4929]: I1122 08:30:02.083374 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" podStartSLOduration=2.083355951 podStartE2EDuration="2.083355951s" podCreationTimestamp="2025-11-22 08:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 08:30:02.07844659 +0000 UTC m=+4739.187900603" watchObservedRunningTime="2025-11-22 08:30:02.083355951 +0000 UTC m=+4739.192809964" Nov 22 08:30:03 crc kubenswrapper[4929]: I1122 08:30:03.092073 4929 generic.go:334] "Generic (PLEG): container finished" podID="d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" containerID="838785e50140799082310c7e2c362318c91a3e48a2ed057ca796e0c3e5ced132" exitCode=0 Nov 22 08:30:03 crc kubenswrapper[4929]: I1122 08:30:03.092167 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" event={"ID":"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d","Type":"ContainerDied","Data":"838785e50140799082310c7e2c362318c91a3e48a2ed057ca796e0c3e5ced132"} Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.431316 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.566387 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume\") pod \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.566564 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume\") pod \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.566626 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxw87\" (UniqueName: \"kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87\") pod \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\" (UID: \"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d\") " Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.567084 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume" (OuterVolumeSpecName: "config-volume") pod "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" (UID: "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.572292 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" (UID: "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.572399 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87" (OuterVolumeSpecName: "kube-api-access-bxw87") pod "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" (UID: "d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d"). InnerVolumeSpecName "kube-api-access-bxw87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.670996 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxw87\" (UniqueName: \"kubernetes.io/projected/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-kube-api-access-bxw87\") on node \"crc\" DevicePath \"\"" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.671054 4929 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:30:04 crc kubenswrapper[4929]: I1122 08:30:04.671071 4929 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.111719 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" event={"ID":"d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d","Type":"ContainerDied","Data":"633851aa2ceb38d5d46b200becc8161822c703f0cb865b1691ad69e882c0d675"} Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.112182 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="633851aa2ceb38d5d46b200becc8161822c703f0cb865b1691ad69e882c0d675" Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.111944 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396670-52mps" Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.157928 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr"] Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.168435 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396625-l5tmr"] Nov 22 08:30:05 crc kubenswrapper[4929]: I1122 08:30:05.960285 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db106667-a593-4ad0-9170-c6c91ae46a7a" path="/var/lib/kubelet/pods/db106667-a593-4ad0-9170-c6c91ae46a7a/volumes" Nov 22 08:30:12 crc kubenswrapper[4929]: I1122 08:30:12.947795 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:30:12 crc kubenswrapper[4929]: E1122 08:30:12.948528 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:30:14 crc kubenswrapper[4929]: I1122 08:30:14.527720 4929 scope.go:117] "RemoveContainer" containerID="fc9cc4960c7c09d4ced9ebd27ba8db8053879773f588a53ca39bf574902e9d8c" Nov 22 08:30:24 crc kubenswrapper[4929]: I1122 08:30:24.947050 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:30:24 crc kubenswrapper[4929]: E1122 08:30:24.947911 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:30:36 crc kubenswrapper[4929]: I1122 08:30:36.948080 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:30:36 crc kubenswrapper[4929]: E1122 08:30:36.949596 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:30:47 crc kubenswrapper[4929]: I1122 08:30:47.948435 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:30:47 crc kubenswrapper[4929]: E1122 08:30:47.949573 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:31:01 crc kubenswrapper[4929]: I1122 08:31:01.947912 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:31:01 crc kubenswrapper[4929]: E1122 08:31:01.948859 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:31:15 crc kubenswrapper[4929]: I1122 08:31:15.948627 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:31:15 crc kubenswrapper[4929]: E1122 08:31:15.949655 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:31:26 crc kubenswrapper[4929]: I1122 08:31:26.947661 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:31:26 crc kubenswrapper[4929]: E1122 08:31:26.948275 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:31:38 crc kubenswrapper[4929]: I1122 08:31:38.947516 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:31:38 crc kubenswrapper[4929]: E1122 08:31:38.948396 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:31:50 crc kubenswrapper[4929]: I1122 08:31:50.947328 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:31:50 crc kubenswrapper[4929]: E1122 08:31:50.948101 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:32:04 crc kubenswrapper[4929]: I1122 08:32:04.948266 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:32:04 crc kubenswrapper[4929]: E1122 08:32:04.949818 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:32:15 crc kubenswrapper[4929]: I1122 08:32:15.949619 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:32:15 crc kubenswrapper[4929]: E1122 08:32:15.951820 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:32:26 crc kubenswrapper[4929]: I1122 08:32:26.947005 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:32:28 crc kubenswrapper[4929]: I1122 08:32:28.459198 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75"} Nov 22 08:33:14 crc kubenswrapper[4929]: I1122 08:33:14.634972 4929 scope.go:117] "RemoveContainer" containerID="d7666b686a5fe229c39817c6e8f4a3117b8a7f9bdfbd71f5f36589ece0e0ed08" Nov 22 08:33:14 crc kubenswrapper[4929]: I1122 08:33:14.657108 4929 scope.go:117] "RemoveContainer" containerID="b88d8f19731115895454aba7e0557910112363bb89b8cd391fe061d8f92723cc" Nov 22 08:33:14 crc kubenswrapper[4929]: I1122 08:33:14.677266 4929 scope.go:117] "RemoveContainer" containerID="a08eb6f0524f89fc2251f2dc64bc0444ed67aeb4b2c3114e3dc316a309f84a4a" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.264690 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:16 crc kubenswrapper[4929]: E1122 08:33:16.265942 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" containerName="collect-profiles" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.266445 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" containerName="collect-profiles" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.266803 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="d50781e9-ec6b-4ab7-9f92-a952d2b9ce1d" containerName="collect-profiles" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.269608 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.291040 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.381479 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.381551 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd4q2\" (UniqueName: \"kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.381639 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.483825 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.484167 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd4q2\" (UniqueName: \"kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.484423 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.484806 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.485149 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.507385 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd4q2\" (UniqueName: \"kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2\") pod \"community-operators-qt2pj\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:16 crc kubenswrapper[4929]: I1122 08:33:16.595229 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:17 crc kubenswrapper[4929]: I1122 08:33:17.173490 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:17 crc kubenswrapper[4929]: I1122 08:33:17.922174 4929 generic.go:334] "Generic (PLEG): container finished" podID="70cd88af-5530-44a1-9169-05792c00706f" containerID="7c0fa7acb5599ff224f33206ce3670ab366b7071a2ad95dbdf134a6c8a94d8a4" exitCode=0 Nov 22 08:33:17 crc kubenswrapper[4929]: I1122 08:33:17.922499 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerDied","Data":"7c0fa7acb5599ff224f33206ce3670ab366b7071a2ad95dbdf134a6c8a94d8a4"} Nov 22 08:33:17 crc kubenswrapper[4929]: I1122 08:33:17.922529 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerStarted","Data":"a072ae6591ca71643fa3c8ce026946568f951e7f80612a59450632c5ca568f95"} Nov 22 08:33:17 crc kubenswrapper[4929]: I1122 08:33:17.924509 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 08:33:18 crc kubenswrapper[4929]: I1122 08:33:18.947436 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerStarted","Data":"2021662c908a792fef507e8d859b923a49f90718f9e2050477d61b82788bbf98"} Nov 22 08:33:19 crc kubenswrapper[4929]: I1122 08:33:19.959376 4929 generic.go:334] "Generic (PLEG): container finished" podID="70cd88af-5530-44a1-9169-05792c00706f" containerID="2021662c908a792fef507e8d859b923a49f90718f9e2050477d61b82788bbf98" exitCode=0 Nov 22 08:33:19 crc kubenswrapper[4929]: I1122 08:33:19.960746 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerDied","Data":"2021662c908a792fef507e8d859b923a49f90718f9e2050477d61b82788bbf98"} Nov 22 08:33:21 crc kubenswrapper[4929]: I1122 08:33:21.980466 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerStarted","Data":"4bdc3b1f4e1d7ee4dd87536cc2f33a619d3a915ce7a90612acd90e83618830fe"} Nov 22 08:33:22 crc kubenswrapper[4929]: I1122 08:33:22.006924 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qt2pj" podStartSLOduration=3.537196303 podStartE2EDuration="6.006897692s" podCreationTimestamp="2025-11-22 08:33:16 +0000 UTC" firstStartedPulling="2025-11-22 08:33:17.924290571 +0000 UTC m=+4935.033744584" lastFinishedPulling="2025-11-22 08:33:20.39399195 +0000 UTC m=+4937.503445973" observedRunningTime="2025-11-22 08:33:21.998085982 +0000 UTC m=+4939.107539995" watchObservedRunningTime="2025-11-22 08:33:22.006897692 +0000 UTC m=+4939.116351715" Nov 22 08:33:26 crc kubenswrapper[4929]: I1122 08:33:26.596069 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:26 crc kubenswrapper[4929]: I1122 08:33:26.596350 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:26 crc kubenswrapper[4929]: I1122 08:33:26.655147 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:27 crc kubenswrapper[4929]: I1122 08:33:27.366466 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:27 crc kubenswrapper[4929]: I1122 08:33:27.428586 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:29 crc kubenswrapper[4929]: I1122 08:33:29.338369 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qt2pj" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="registry-server" containerID="cri-o://4bdc3b1f4e1d7ee4dd87536cc2f33a619d3a915ce7a90612acd90e83618830fe" gracePeriod=2 Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.349893 4929 generic.go:334] "Generic (PLEG): container finished" podID="70cd88af-5530-44a1-9169-05792c00706f" containerID="4bdc3b1f4e1d7ee4dd87536cc2f33a619d3a915ce7a90612acd90e83618830fe" exitCode=0 Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.350074 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerDied","Data":"4bdc3b1f4e1d7ee4dd87536cc2f33a619d3a915ce7a90612acd90e83618830fe"} Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.478146 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.667512 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content\") pod \"70cd88af-5530-44a1-9169-05792c00706f\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.667746 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd4q2\" (UniqueName: \"kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2\") pod \"70cd88af-5530-44a1-9169-05792c00706f\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.667810 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities\") pod \"70cd88af-5530-44a1-9169-05792c00706f\" (UID: \"70cd88af-5530-44a1-9169-05792c00706f\") " Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.668820 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities" (OuterVolumeSpecName: "utilities") pod "70cd88af-5530-44a1-9169-05792c00706f" (UID: "70cd88af-5530-44a1-9169-05792c00706f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.674066 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2" (OuterVolumeSpecName: "kube-api-access-cd4q2") pod "70cd88af-5530-44a1-9169-05792c00706f" (UID: "70cd88af-5530-44a1-9169-05792c00706f"). InnerVolumeSpecName "kube-api-access-cd4q2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.770050 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd4q2\" (UniqueName: \"kubernetes.io/projected/70cd88af-5530-44a1-9169-05792c00706f-kube-api-access-cd4q2\") on node \"crc\" DevicePath \"\"" Nov 22 08:33:30 crc kubenswrapper[4929]: I1122 08:33:30.770393 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.362757 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qt2pj" event={"ID":"70cd88af-5530-44a1-9169-05792c00706f","Type":"ContainerDied","Data":"a072ae6591ca71643fa3c8ce026946568f951e7f80612a59450632c5ca568f95"} Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.362805 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qt2pj" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.362815 4929 scope.go:117] "RemoveContainer" containerID="4bdc3b1f4e1d7ee4dd87536cc2f33a619d3a915ce7a90612acd90e83618830fe" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.388583 4929 scope.go:117] "RemoveContainer" containerID="2021662c908a792fef507e8d859b923a49f90718f9e2050477d61b82788bbf98" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.409895 4929 scope.go:117] "RemoveContainer" containerID="7c0fa7acb5599ff224f33206ce3670ab366b7071a2ad95dbdf134a6c8a94d8a4" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.467952 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70cd88af-5530-44a1-9169-05792c00706f" (UID: "70cd88af-5530-44a1-9169-05792c00706f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.486250 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70cd88af-5530-44a1-9169-05792c00706f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.696253 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.706687 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qt2pj"] Nov 22 08:33:31 crc kubenswrapper[4929]: I1122 08:33:31.960847 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70cd88af-5530-44a1-9169-05792c00706f" path="/var/lib/kubelet/pods/70cd88af-5530-44a1-9169-05792c00706f/volumes" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.452991 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nmgqd/must-gather-5gcnm"] Nov 22 08:34:21 crc kubenswrapper[4929]: E1122 08:34:21.453978 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="extract-content" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.453996 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="extract-content" Nov 22 08:34:21 crc kubenswrapper[4929]: E1122 08:34:21.454026 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="extract-utilities" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.454032 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="extract-utilities" Nov 22 08:34:21 crc kubenswrapper[4929]: E1122 08:34:21.454050 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="registry-server" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.454059 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="registry-server" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.454292 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="70cd88af-5530-44a1-9169-05792c00706f" containerName="registry-server" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.455356 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.458113 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-nmgqd"/"openshift-service-ca.crt" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.458170 4929 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-nmgqd"/"default-dockercfg-lfssm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.458669 4929 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-nmgqd"/"kube-root-ca.crt" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.488673 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-nmgqd/must-gather-5gcnm"] Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.517851 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7vpw\" (UniqueName: \"kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.518089 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.623740 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7vpw\" (UniqueName: \"kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.624168 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.624715 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.651096 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7vpw\" (UniqueName: \"kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw\") pod \"must-gather-5gcnm\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:21 crc kubenswrapper[4929]: I1122 08:34:21.781981 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:34:22 crc kubenswrapper[4929]: I1122 08:34:22.449852 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-nmgqd/must-gather-5gcnm"] Nov 22 08:34:22 crc kubenswrapper[4929]: I1122 08:34:22.853773 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" event={"ID":"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d","Type":"ContainerStarted","Data":"76bac6405b40381ba5825ad379e2dc6305740a343d05b82b1aeceb44da5c8a92"} Nov 22 08:34:33 crc kubenswrapper[4929]: I1122 08:34:33.962121 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" event={"ID":"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d","Type":"ContainerStarted","Data":"9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f"} Nov 22 08:34:33 crc kubenswrapper[4929]: I1122 08:34:33.962677 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" event={"ID":"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d","Type":"ContainerStarted","Data":"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0"} Nov 22 08:34:33 crc kubenswrapper[4929]: I1122 08:34:33.988104 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" podStartSLOduration=2.107059924 podStartE2EDuration="12.988078557s" podCreationTimestamp="2025-11-22 08:34:21 +0000 UTC" firstStartedPulling="2025-11-22 08:34:22.460299674 +0000 UTC m=+4999.569753687" lastFinishedPulling="2025-11-22 08:34:33.341318307 +0000 UTC m=+5010.450772320" observedRunningTime="2025-11-22 08:34:33.980491558 +0000 UTC m=+5011.089945581" watchObservedRunningTime="2025-11-22 08:34:33.988078557 +0000 UTC m=+5011.097532570" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.691510 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-zd58v"] Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.694336 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.783142 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9q7f\" (UniqueName: \"kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.783341 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.885529 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.885894 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.886761 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9q7f\" (UniqueName: \"kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:41 crc kubenswrapper[4929]: I1122 08:34:41.911322 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9q7f\" (UniqueName: \"kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f\") pod \"crc-debug-zd58v\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:42 crc kubenswrapper[4929]: I1122 08:34:42.018863 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:34:42 crc kubenswrapper[4929]: W1122 08:34:42.067264 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f7a09f8_294b_42b8_be08_fb50df64eb25.slice/crio-f0240c7fafb126356e071d712e70bfeff172a97769e38b4384fd4af6a46e7828 WatchSource:0}: Error finding container f0240c7fafb126356e071d712e70bfeff172a97769e38b4384fd4af6a46e7828: Status 404 returned error can't find the container with id f0240c7fafb126356e071d712e70bfeff172a97769e38b4384fd4af6a46e7828 Nov 22 08:34:43 crc kubenswrapper[4929]: I1122 08:34:43.052682 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" event={"ID":"4f7a09f8-294b-42b8-be08-fb50df64eb25","Type":"ContainerStarted","Data":"f0240c7fafb126356e071d712e70bfeff172a97769e38b4384fd4af6a46e7828"} Nov 22 08:34:48 crc kubenswrapper[4929]: I1122 08:34:48.594848 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:34:48 crc kubenswrapper[4929]: I1122 08:34:48.595726 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:35:06 crc kubenswrapper[4929]: E1122 08:35:06.075390 4929 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296" Nov 22 08:35:06 crc kubenswrapper[4929]: E1122 08:35:06.076503 4929 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-00,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296,Command:[chroot /host bash -c echo 'TOOLBOX_NAME=toolbox-osp' > /root/.toolboxrc ; rm -rf \"/var/tmp/sos-osp\" && mkdir -p \"/var/tmp/sos-osp\" && sudo podman rm --force toolbox-osp; sudo --preserve-env podman pull --authfile /var/lib/kubelet/config.json registry.redhat.io/rhel9/support-tools && toolbox sos report --batch --all-logs --only-plugins block,cifs,crio,devicemapper,devices,firewall_tables,firewalld,iscsi,lvm2,memory,multipath,nfs,nis,nvme,podman,process,processor,selinux,scsi,udev,logs,crypto --tmp-dir=\"/var/tmp/sos-osp\" && if [[ \"$(ls /var/log/pods/*/{*.log.*,*/*.log.*} 2>/dev/null)\" != '' ]]; then tar --ignore-failed-read --warning=no-file-changed -cJf \"/var/tmp/sos-osp/podlogs.tar.xz\" --transform 's,^,podlogs/,' /var/log/pods/*/{*.log.*,*/*.log.*} || true; fi],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:TMOUT,Value:900,ValueFrom:nil,},EnvVar{Name:HOST,Value:/host,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host,ReadOnly:false,MountPath:/host,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g9q7f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod crc-debug-zd58v_openshift-must-gather-nmgqd(4f7a09f8-294b-42b8-be08-fb50df64eb25): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 08:35:06 crc kubenswrapper[4929]: E1122 08:35:06.077754 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" Nov 22 08:35:06 crc kubenswrapper[4929]: E1122 08:35:06.323322 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"container-00\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6ab858aed98e4fe57e6b144da8e90ad5d6698bb4cc5521206f5c05809f0f9296\\\"\"" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" Nov 22 08:35:18 crc kubenswrapper[4929]: I1122 08:35:18.594174 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:35:18 crc kubenswrapper[4929]: I1122 08:35:18.596171 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:35:23 crc kubenswrapper[4929]: I1122 08:35:23.484832 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" event={"ID":"4f7a09f8-294b-42b8-be08-fb50df64eb25","Type":"ContainerStarted","Data":"1b7a4e67fd07168f7162b22d96969026d61d93855a9493f23065fb7e5573b00e"} Nov 22 08:35:48 crc kubenswrapper[4929]: I1122 08:35:48.595020 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:35:48 crc kubenswrapper[4929]: I1122 08:35:48.596952 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:35:48 crc kubenswrapper[4929]: I1122 08:35:48.597012 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:35:48 crc kubenswrapper[4929]: I1122 08:35:48.598062 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:35:48 crc kubenswrapper[4929]: I1122 08:35:48.598121 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75" gracePeriod=600 Nov 22 08:35:53 crc kubenswrapper[4929]: I1122 08:35:53.083595 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75" exitCode=0 Nov 22 08:35:53 crc kubenswrapper[4929]: I1122 08:35:53.083643 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75"} Nov 22 08:35:53 crc kubenswrapper[4929]: I1122 08:35:53.085435 4929 scope.go:117] "RemoveContainer" containerID="72dffffb94ab7e9f08296ec1253ecb61da9c12bb296e0a868af12dda8da0ef53" Nov 22 08:35:56 crc kubenswrapper[4929]: I1122 08:35:56.113614 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95"} Nov 22 08:35:56 crc kubenswrapper[4929]: I1122 08:35:56.136384 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" podStartSLOduration=34.502586876 podStartE2EDuration="1m15.136359937s" podCreationTimestamp="2025-11-22 08:34:41 +0000 UTC" firstStartedPulling="2025-11-22 08:34:42.072430909 +0000 UTC m=+5019.181884932" lastFinishedPulling="2025-11-22 08:35:22.70620398 +0000 UTC m=+5059.815657993" observedRunningTime="2025-11-22 08:35:23.502908138 +0000 UTC m=+5060.612362161" watchObservedRunningTime="2025-11-22 08:35:56.136359937 +0000 UTC m=+5093.245813940" Nov 22 08:36:15 crc kubenswrapper[4929]: I1122 08:36:15.313752 4929 generic.go:334] "Generic (PLEG): container finished" podID="4f7a09f8-294b-42b8-be08-fb50df64eb25" containerID="1b7a4e67fd07168f7162b22d96969026d61d93855a9493f23065fb7e5573b00e" exitCode=0 Nov 22 08:36:15 crc kubenswrapper[4929]: I1122 08:36:15.314024 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" event={"ID":"4f7a09f8-294b-42b8-be08-fb50df64eb25","Type":"ContainerDied","Data":"1b7a4e67fd07168f7162b22d96969026d61d93855a9493f23065fb7e5573b00e"} Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.448400 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.484181 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-zd58v"] Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.492010 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-zd58v"] Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.525142 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host\") pod \"4f7a09f8-294b-42b8-be08-fb50df64eb25\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.525478 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9q7f\" (UniqueName: \"kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f\") pod \"4f7a09f8-294b-42b8-be08-fb50df64eb25\" (UID: \"4f7a09f8-294b-42b8-be08-fb50df64eb25\") " Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.525419 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host" (OuterVolumeSpecName: "host") pod "4f7a09f8-294b-42b8-be08-fb50df64eb25" (UID: "4f7a09f8-294b-42b8-be08-fb50df64eb25"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.527490 4929 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4f7a09f8-294b-42b8-be08-fb50df64eb25-host\") on node \"crc\" DevicePath \"\"" Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.531700 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f" (OuterVolumeSpecName: "kube-api-access-g9q7f") pod "4f7a09f8-294b-42b8-be08-fb50df64eb25" (UID: "4f7a09f8-294b-42b8-be08-fb50df64eb25"). InnerVolumeSpecName "kube-api-access-g9q7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:36:16 crc kubenswrapper[4929]: I1122 08:36:16.629748 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9q7f\" (UniqueName: \"kubernetes.io/projected/4f7a09f8-294b-42b8-be08-fb50df64eb25-kube-api-access-g9q7f\") on node \"crc\" DevicePath \"\"" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.333898 4929 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0240c7fafb126356e071d712e70bfeff172a97769e38b4384fd4af6a46e7828" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.333946 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-zd58v" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.668826 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-k9jmp"] Nov 22 08:36:17 crc kubenswrapper[4929]: E1122 08:36:17.669392 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" containerName="container-00" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.669408 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" containerName="container-00" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.669691 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" containerName="container-00" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.670600 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.756300 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.757018 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh7t7\" (UniqueName: \"kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.859748 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.860076 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh7t7\" (UniqueName: \"kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.859940 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.882799 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh7t7\" (UniqueName: \"kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7\") pod \"crc-debug-k9jmp\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.959962 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f7a09f8-294b-42b8-be08-fb50df64eb25" path="/var/lib/kubelet/pods/4f7a09f8-294b-42b8-be08-fb50df64eb25/volumes" Nov 22 08:36:17 crc kubenswrapper[4929]: I1122 08:36:17.994832 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:18 crc kubenswrapper[4929]: W1122 08:36:18.029484 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a0d1b9c_c586_4be6_ba38_6d54f04fba09.slice/crio-38f367d6ef5227f3c3f5b2e68f3a87646001a11af96e5fe4ad35aa25beb35b7b WatchSource:0}: Error finding container 38f367d6ef5227f3c3f5b2e68f3a87646001a11af96e5fe4ad35aa25beb35b7b: Status 404 returned error can't find the container with id 38f367d6ef5227f3c3f5b2e68f3a87646001a11af96e5fe4ad35aa25beb35b7b Nov 22 08:36:18 crc kubenswrapper[4929]: I1122 08:36:18.342072 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" event={"ID":"0a0d1b9c-c586-4be6-ba38-6d54f04fba09","Type":"ContainerStarted","Data":"38f367d6ef5227f3c3f5b2e68f3a87646001a11af96e5fe4ad35aa25beb35b7b"} Nov 22 08:36:19 crc kubenswrapper[4929]: I1122 08:36:19.354298 4929 generic.go:334] "Generic (PLEG): container finished" podID="0a0d1b9c-c586-4be6-ba38-6d54f04fba09" containerID="22bfe876fd0d199cbc716b2e516063b3a78ec364df2bc027740fe4754069dea2" exitCode=1 Nov 22 08:36:19 crc kubenswrapper[4929]: I1122 08:36:19.354378 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" event={"ID":"0a0d1b9c-c586-4be6-ba38-6d54f04fba09","Type":"ContainerDied","Data":"22bfe876fd0d199cbc716b2e516063b3a78ec364df2bc027740fe4754069dea2"} Nov 22 08:36:19 crc kubenswrapper[4929]: I1122 08:36:19.397313 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-k9jmp"] Nov 22 08:36:19 crc kubenswrapper[4929]: I1122 08:36:19.404977 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nmgqd/crc-debug-k9jmp"] Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.483939 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.621923 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh7t7\" (UniqueName: \"kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7\") pod \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.622478 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host\") pod \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\" (UID: \"0a0d1b9c-c586-4be6-ba38-6d54f04fba09\") " Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.623271 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host" (OuterVolumeSpecName: "host") pod "0a0d1b9c-c586-4be6-ba38-6d54f04fba09" (UID: "0a0d1b9c-c586-4be6-ba38-6d54f04fba09"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.623440 4929 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-host\") on node \"crc\" DevicePath \"\"" Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.631118 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7" (OuterVolumeSpecName: "kube-api-access-vh7t7") pod "0a0d1b9c-c586-4be6-ba38-6d54f04fba09" (UID: "0a0d1b9c-c586-4be6-ba38-6d54f04fba09"). InnerVolumeSpecName "kube-api-access-vh7t7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:36:20 crc kubenswrapper[4929]: I1122 08:36:20.725243 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh7t7\" (UniqueName: \"kubernetes.io/projected/0a0d1b9c-c586-4be6-ba38-6d54f04fba09-kube-api-access-vh7t7\") on node \"crc\" DevicePath \"\"" Nov 22 08:36:21 crc kubenswrapper[4929]: I1122 08:36:21.376404 4929 scope.go:117] "RemoveContainer" containerID="22bfe876fd0d199cbc716b2e516063b3a78ec364df2bc027740fe4754069dea2" Nov 22 08:36:21 crc kubenswrapper[4929]: I1122 08:36:21.376469 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/crc-debug-k9jmp" Nov 22 08:36:21 crc kubenswrapper[4929]: I1122 08:36:21.961607 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a0d1b9c-c586-4be6-ba38-6d54f04fba09" path="/var/lib/kubelet/pods/0a0d1b9c-c586-4be6-ba38-6d54f04fba09/volumes" Nov 22 08:36:46 crc kubenswrapper[4929]: I1122 08:36:46.527003 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7486bd8978-4r6hg_edd001b8-6115-447e-bf2e-89d0a843f681/barbican-api/0.log" Nov 22 08:36:46 crc kubenswrapper[4929]: I1122 08:36:46.713905 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7486bd8978-4r6hg_edd001b8-6115-447e-bf2e-89d0a843f681/barbican-api-log/0.log" Nov 22 08:36:46 crc kubenswrapper[4929]: I1122 08:36:46.817420 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6dc6c45b7d-zjztr_cb931a9c-bada-431f-aaae-d3c1603b8d37/barbican-keystone-listener/0.log" Nov 22 08:36:46 crc kubenswrapper[4929]: I1122 08:36:46.908591 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6dc6c45b7d-zjztr_cb931a9c-bada-431f-aaae-d3c1603b8d37/barbican-keystone-listener-log/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.042548 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-865648cb87-24j78_8ba5625d-6646-4b1c-aace-040845557c79/barbican-worker/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.056270 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-865648cb87-24j78_8ba5625d-6646-4b1c-aace-040845557c79/barbican-worker-log/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.250057 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ca38dd37-7bd6-475b-917d-459f11c50877/ceilometer-central-agent/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.258748 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ca38dd37-7bd6-475b-917d-459f11c50877/ceilometer-notification-agent/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.284493 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ca38dd37-7bd6-475b-917d-459f11c50877/proxy-httpd/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.395272 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ca38dd37-7bd6-475b-917d-459f11c50877/sg-core/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.518660 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_c0f7a9d8-548a-4490-9fd9-8c431e6ca06a/cinder-api-log/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.520749 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_c0f7a9d8-548a-4490-9fd9-8c431e6ca06a/cinder-api/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.780093 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_143bdc31-dc3f-4c3f-81e2-9e4314ba960d/cinder-scheduler/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.797221 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_143bdc31-dc3f-4c3f-81e2-9e4314ba960d/probe/0.log" Nov 22 08:36:47 crc kubenswrapper[4929]: I1122 08:36:47.924369 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-89c5cd4d5-w5q2v_df8d4d7c-d69a-487e-87ad-04d59572e2ec/init/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.172857 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-89c5cd4d5-w5q2v_df8d4d7c-d69a-487e-87ad-04d59572e2ec/dnsmasq-dns/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.176181 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bae86c1f-5d59-4797-9d7e-f60207880160/glance-httpd/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.285607 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-89c5cd4d5-w5q2v_df8d4d7c-d69a-487e-87ad-04d59572e2ec/init/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.371978 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_bae86c1f-5d59-4797-9d7e-f60207880160/glance-log/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.492892 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_695d5e6c-3514-4a49-bde8-53d4e84a7716/glance-httpd/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.502515 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_695d5e6c-3514-4a49-bde8-53d4e84a7716/glance-log/0.log" Nov 22 08:36:48 crc kubenswrapper[4929]: I1122 08:36:48.807411 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58894b567d-khmvq_d8f14a1c-f6bf-4a66-b839-393c7c34b932/horizon/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.037964 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29396641-jdmjw_c9c6000c-4848-4dcb-a01e-2f3a33a1810f/keystone-cron/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.074054 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5fd7f9f4fc-7jzwk_4320c60d-8c47-49d1-9fae-6118bfb228dc/keystone-api/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.302888 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-58894b567d-khmvq_d8f14a1c-f6bf-4a66-b839-393c7c34b932/horizon-log/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.321262 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_25fc7418-569c-4102-9186-f21d540d4a67/kube-state-metrics/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.571228 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6d5d9c9567-b7l75_b2c46869-321d-4fb4-bc99-a9f61f36a236/neutron-api/0.log" Nov 22 08:36:49 crc kubenswrapper[4929]: I1122 08:36:49.741249 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-6d5d9c9567-b7l75_b2c46869-321d-4fb4-bc99-a9f61f36a236/neutron-httpd/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.091486 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e17c146a-02fe-4dc5-8199-c8c3f4ac12a7/nova-api-log/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.203911 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e17c146a-02fe-4dc5-8199-c8c3f4ac12a7/nova-api-api/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.232108 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_c3409b70-c916-47cc-92f5-e005ef66f2b8/nova-cell0-conductor-conductor/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.422006 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_58b3b346-fb7a-4957-b9e3-eadbfb64deb6/nova-cell1-conductor-conductor/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.620036 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_4a10d2fe-ff47-453d-a731-1b0077d89474/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 08:36:50 crc kubenswrapper[4929]: I1122 08:36:50.950098 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_82e5e3cf-4e6b-4110-bb64-909de233a84a/nova-metadata-log/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.226681 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2/mysql-bootstrap/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.290750 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7baf0b74-ace4-4305-b9a4-f9943b7fc888/nova-scheduler-scheduler/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.552863 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2/galera/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.569315 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_d3cb3949-cd8a-470d-8a5c-3db9bd9bc6a2/mysql-bootstrap/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.816657 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6a23e882-732c-416e-bfc5-c91517389f64/mysql-bootstrap/0.log" Nov 22 08:36:51 crc kubenswrapper[4929]: I1122 08:36:51.982794 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6a23e882-732c-416e-bfc5-c91517389f64/mysql-bootstrap/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.037188 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_6a23e882-732c-416e-bfc5-c91517389f64/galera/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.242819 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_72671250-e761-4371-aac4-4789b677d1d7/openstackclient/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.451491 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6p98g_a17264d7-e93b-4448-9ed8-0be507a9120f/ovn-controller/1.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.554858 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_82e5e3cf-4e6b-4110-bb64-909de233a84a/nova-metadata-metadata/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.556412 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-6p98g_a17264d7-e93b-4448-9ed8-0be507a9120f/ovn-controller/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.762532 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-p8r4f_06fbe85e-ec6c-4a26-bbb2-3ec2e8bcd380/openstack-network-exporter/0.log" Nov 22 08:36:52 crc kubenswrapper[4929]: I1122 08:36:52.800781 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gglj6_001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8/ovsdb-server-init/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.105664 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gglj6_001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8/ovsdb-server-init/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.120323 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gglj6_001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8/ovsdb-server/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.143132 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-gglj6_001fa5a3-fac0-4b3f-99dc-bd910c8e2bb8/ovs-vswitchd/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.373135 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2d73bc22-a412-491f-9484-71864027c02f/ovn-northd/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.404926 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_2d73bc22-a412-491f-9484-71864027c02f/openstack-network-exporter/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.424444 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2886f6ad-e9ae-48fa-b9eb-9688a1022f29/openstack-network-exporter/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.617429 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_2886f6ad-e9ae-48fa-b9eb-9688a1022f29/ovsdbserver-nb/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.650693 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d/openstack-network-exporter/0.log" Nov 22 08:36:53 crc kubenswrapper[4929]: I1122 08:36:53.767449 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4dfcc2c0-f2e1-4c0a-9bf4-f0b56f89214d/ovsdbserver-sb/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.003991 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bcb6df596-xswbt_28cf3d66-0622-409a-b63b-212351f3827d/placement-api/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.037624 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bcb6df596-xswbt_28cf3d66-0622-409a-b63b-212351f3827d/placement-log/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.136515 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4/init-config-reloader/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.414656 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4/init-config-reloader/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.421805 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4/config-reloader/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.467398 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4/thanos-sidecar/0.log" Nov 22 08:36:54 crc kubenswrapper[4929]: I1122 08:36:54.482152 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_fc7d5e3a-98f9-4a7d-a312-69d04dd5bcd4/prometheus/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.187606 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_941ef061-1085-45ca-84e2-60447bb10c47/setup-container/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.411027 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_941ef061-1085-45ca-84e2-60447bb10c47/setup-container/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.418298 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_941ef061-1085-45ca-84e2-60447bb10c47/rabbitmq/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.539702 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_76073571-729d-4de5-bda6-780d28ae6a9b/setup-container/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.768950 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_76073571-729d-4de5-bda6-780d28ae6a9b/setup-container/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.779619 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_76073571-729d-4de5-bda6-780d28ae6a9b/rabbitmq/0.log" Nov 22 08:36:55 crc kubenswrapper[4929]: I1122 08:36:55.988565 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b65879b9f-vjcj8_3971fe14-e65d-4a66-a0ce-d004f1f4e0f1/proxy-httpd/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.175963 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-bbxj7_4c640fe8-4583-4162-949d-4508edaca274/swift-ring-rebalance/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.182028 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-5b65879b9f-vjcj8_3971fe14-e65d-4a66-a0ce-d004f1f4e0f1/proxy-server/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.292489 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/account-auditor/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.443413 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/account-reaper/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.509696 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/account-server/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.546099 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/container-auditor/0.log" Nov 22 08:36:56 crc kubenswrapper[4929]: I1122 08:36:56.566456 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/account-replicator/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.371909 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/container-server/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.392865 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/container-updater/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.416834 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/container-replicator/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.428926 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/object-auditor/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.595843 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/object-expirer/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.651308 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/object-server/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.683300 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/object-replicator/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.716132 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/object-updater/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.841455 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/rsync/0.log" Nov 22 08:36:57 crc kubenswrapper[4929]: I1122 08:36:57.864921 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_dab37299-3b8e-46d0-b6a5-044f7d4878d6/swift-recon-cron/0.log" Nov 22 08:36:58 crc kubenswrapper[4929]: I1122 08:36:58.180647 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_5143f7c3-b064-4290-96a9-fb164c215793/watcher-api-log/0.log" Nov 22 08:36:58 crc kubenswrapper[4929]: I1122 08:36:58.429259 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_b26c616d-54c4-4a6c-aef6-4f17f7442138/watcher-applier/0.log" Nov 22 08:36:58 crc kubenswrapper[4929]: I1122 08:36:58.755824 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_f3dae3e9-bb7e-42e8-801e-b313a6385954/watcher-decision-engine/0.log" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.457922 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:36:59 crc kubenswrapper[4929]: E1122 08:36:59.459058 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a0d1b9c-c586-4be6-ba38-6d54f04fba09" containerName="container-00" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.459085 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a0d1b9c-c586-4be6-ba38-6d54f04fba09" containerName="container-00" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.459515 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a0d1b9c-c586-4be6-ba38-6d54f04fba09" containerName="container-00" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.461593 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.471133 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.596588 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvfwq\" (UniqueName: \"kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.596708 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.596793 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.698395 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.698479 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.698563 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvfwq\" (UniqueName: \"kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.699275 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.699502 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.729040 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvfwq\" (UniqueName: \"kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq\") pod \"certified-operators-9jn7k\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:36:59 crc kubenswrapper[4929]: I1122 08:36:59.801987 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:00 crc kubenswrapper[4929]: I1122 08:37:00.548753 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:37:00 crc kubenswrapper[4929]: I1122 08:37:00.928458 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_5143f7c3-b064-4290-96a9-fb164c215793/watcher-api/0.log" Nov 22 08:37:01 crc kubenswrapper[4929]: I1122 08:37:01.515010 4929 generic.go:334] "Generic (PLEG): container finished" podID="aa615d23-6b44-4eec-826c-415504f99413" containerID="f5cac1d3f13e6b2eb03f29d08eb52b00a4c6dc988ba13a6cb3aa461d4f59ec1d" exitCode=0 Nov 22 08:37:01 crc kubenswrapper[4929]: I1122 08:37:01.515062 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerDied","Data":"f5cac1d3f13e6b2eb03f29d08eb52b00a4c6dc988ba13a6cb3aa461d4f59ec1d"} Nov 22 08:37:01 crc kubenswrapper[4929]: I1122 08:37:01.515093 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerStarted","Data":"c25c73a25e5d1611bb7a9ec845633de93807acaa969d83681512bc32c0da7b28"} Nov 22 08:37:03 crc kubenswrapper[4929]: I1122 08:37:03.539288 4929 generic.go:334] "Generic (PLEG): container finished" podID="aa615d23-6b44-4eec-826c-415504f99413" containerID="d72b362f8154d31676179328346a31a4452b0c3c1d75a3c7475d3f7841e4173e" exitCode=0 Nov 22 08:37:03 crc kubenswrapper[4929]: I1122 08:37:03.539966 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerDied","Data":"d72b362f8154d31676179328346a31a4452b0c3c1d75a3c7475d3f7841e4173e"} Nov 22 08:37:05 crc kubenswrapper[4929]: I1122 08:37:05.588585 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerStarted","Data":"c64fc5cc22461a9c6f76fde52da8a2afdbfb03ab12f3a7867bade642e9646fa8"} Nov 22 08:37:05 crc kubenswrapper[4929]: I1122 08:37:05.624506 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9jn7k" podStartSLOduration=3.56906654 podStartE2EDuration="6.624481326s" podCreationTimestamp="2025-11-22 08:36:59 +0000 UTC" firstStartedPulling="2025-11-22 08:37:01.516976442 +0000 UTC m=+5158.626430455" lastFinishedPulling="2025-11-22 08:37:04.572391228 +0000 UTC m=+5161.681845241" observedRunningTime="2025-11-22 08:37:05.612891006 +0000 UTC m=+5162.722345019" watchObservedRunningTime="2025-11-22 08:37:05.624481326 +0000 UTC m=+5162.733935339" Nov 22 08:37:09 crc kubenswrapper[4929]: I1122 08:37:09.186849 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_619c7624-23e8-4578-8361-b9c91a56e5c9/memcached/0.log" Nov 22 08:37:09 crc kubenswrapper[4929]: I1122 08:37:09.803282 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:09 crc kubenswrapper[4929]: I1122 08:37:09.803355 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:09 crc kubenswrapper[4929]: I1122 08:37:09.874329 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.065717 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.083834 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.149403 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.177677 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.177804 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.177860 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbkjl\" (UniqueName: \"kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.280090 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbkjl\" (UniqueName: \"kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.280372 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.280495 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.280916 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.281046 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.301282 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbkjl\" (UniqueName: \"kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl\") pod \"redhat-marketplace-r4p65\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.442660 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:10 crc kubenswrapper[4929]: I1122 08:37:10.715692 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:11 crc kubenswrapper[4929]: I1122 08:37:11.005329 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:11 crc kubenswrapper[4929]: I1122 08:37:11.651171 4929 generic.go:334] "Generic (PLEG): container finished" podID="36faef4c-842e-42af-8dc6-6999fda45e60" containerID="5c81e917a814060637ef31f1dd68a5a12624d33812e26357d826a2873591f710" exitCode=0 Nov 22 08:37:11 crc kubenswrapper[4929]: I1122 08:37:11.651318 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerDied","Data":"5c81e917a814060637ef31f1dd68a5a12624d33812e26357d826a2873591f710"} Nov 22 08:37:11 crc kubenswrapper[4929]: I1122 08:37:11.651614 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerStarted","Data":"2c8129e905b84c536ac4e8445578eb700ad03f17218083eed26c7a075d2c290c"} Nov 22 08:37:13 crc kubenswrapper[4929]: I1122 08:37:13.672638 4929 generic.go:334] "Generic (PLEG): container finished" podID="36faef4c-842e-42af-8dc6-6999fda45e60" containerID="95fa6a10794c5000738d50ae42f496d00f8ce2c4d05d54212ec69fa89b22e418" exitCode=0 Nov 22 08:37:13 crc kubenswrapper[4929]: I1122 08:37:13.672808 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerDied","Data":"95fa6a10794c5000738d50ae42f496d00f8ce2c4d05d54212ec69fa89b22e418"} Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.270406 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.274023 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.290967 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.374979 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.375146 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.375403 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2pjx\" (UniqueName: \"kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.477867 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.477958 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.478041 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2pjx\" (UniqueName: \"kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.478507 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.478598 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.499184 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2pjx\" (UniqueName: \"kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx\") pod \"redhat-operators-fzbmv\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:14 crc kubenswrapper[4929]: I1122 08:37:14.601181 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:15 crc kubenswrapper[4929]: I1122 08:37:15.220801 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:37:15 crc kubenswrapper[4929]: W1122 08:37:15.233129 4929 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7407e2e2_93e4_4598_8c40_77086e07bf74.slice/crio-c738f2788bbe0698a2276628f98f23a3c19e1b59d09fb00eefe703a5f891dc66 WatchSource:0}: Error finding container c738f2788bbe0698a2276628f98f23a3c19e1b59d09fb00eefe703a5f891dc66: Status 404 returned error can't find the container with id c738f2788bbe0698a2276628f98f23a3c19e1b59d09fb00eefe703a5f891dc66 Nov 22 08:37:15 crc kubenswrapper[4929]: I1122 08:37:15.701838 4929 generic.go:334] "Generic (PLEG): container finished" podID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerID="c1de85bd6398974d55ce468ce1f2e3c1d95c54d88d2952d4d849758db3cb9923" exitCode=0 Nov 22 08:37:15 crc kubenswrapper[4929]: I1122 08:37:15.701912 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerDied","Data":"c1de85bd6398974d55ce468ce1f2e3c1d95c54d88d2952d4d849758db3cb9923"} Nov 22 08:37:15 crc kubenswrapper[4929]: I1122 08:37:15.702280 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerStarted","Data":"c738f2788bbe0698a2276628f98f23a3c19e1b59d09fb00eefe703a5f891dc66"} Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.455306 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.455844 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9jn7k" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="registry-server" containerID="cri-o://c64fc5cc22461a9c6f76fde52da8a2afdbfb03ab12f3a7867bade642e9646fa8" gracePeriod=2 Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.716124 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerStarted","Data":"13f534c5fe10e6916abde065be33a7f5c03a7b90a2b99aa66292028bd48bdf3e"} Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.718873 4929 generic.go:334] "Generic (PLEG): container finished" podID="aa615d23-6b44-4eec-826c-415504f99413" containerID="c64fc5cc22461a9c6f76fde52da8a2afdbfb03ab12f3a7867bade642e9646fa8" exitCode=0 Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.718921 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerDied","Data":"c64fc5cc22461a9c6f76fde52da8a2afdbfb03ab12f3a7867bade642e9646fa8"} Nov 22 08:37:16 crc kubenswrapper[4929]: I1122 08:37:16.746758 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r4p65" podStartSLOduration=2.582598609 podStartE2EDuration="6.746737487s" podCreationTimestamp="2025-11-22 08:37:10 +0000 UTC" firstStartedPulling="2025-11-22 08:37:11.653440312 +0000 UTC m=+5168.762894325" lastFinishedPulling="2025-11-22 08:37:15.81757919 +0000 UTC m=+5172.927033203" observedRunningTime="2025-11-22 08:37:16.737388213 +0000 UTC m=+5173.846842236" watchObservedRunningTime="2025-11-22 08:37:16.746737487 +0000 UTC m=+5173.856191500" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.605689 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.658035 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content\") pod \"aa615d23-6b44-4eec-826c-415504f99413\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.666527 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities\") pod \"aa615d23-6b44-4eec-826c-415504f99413\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.666871 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvfwq\" (UniqueName: \"kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq\") pod \"aa615d23-6b44-4eec-826c-415504f99413\" (UID: \"aa615d23-6b44-4eec-826c-415504f99413\") " Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.669233 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities" (OuterVolumeSpecName: "utilities") pod "aa615d23-6b44-4eec-826c-415504f99413" (UID: "aa615d23-6b44-4eec-826c-415504f99413"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.687746 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq" (OuterVolumeSpecName: "kube-api-access-rvfwq") pod "aa615d23-6b44-4eec-826c-415504f99413" (UID: "aa615d23-6b44-4eec-826c-415504f99413"). InnerVolumeSpecName "kube-api-access-rvfwq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.712339 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa615d23-6b44-4eec-826c-415504f99413" (UID: "aa615d23-6b44-4eec-826c-415504f99413"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.746032 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9jn7k" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.746043 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9jn7k" event={"ID":"aa615d23-6b44-4eec-826c-415504f99413","Type":"ContainerDied","Data":"c25c73a25e5d1611bb7a9ec845633de93807acaa969d83681512bc32c0da7b28"} Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.747101 4929 scope.go:117] "RemoveContainer" containerID="c64fc5cc22461a9c6f76fde52da8a2afdbfb03ab12f3a7867bade642e9646fa8" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.773620 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvfwq\" (UniqueName: \"kubernetes.io/projected/aa615d23-6b44-4eec-826c-415504f99413-kube-api-access-rvfwq\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.773695 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.773712 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa615d23-6b44-4eec-826c-415504f99413-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.780075 4929 scope.go:117] "RemoveContainer" containerID="d72b362f8154d31676179328346a31a4452b0c3c1d75a3c7475d3f7841e4173e" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.800093 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.814637 4929 scope.go:117] "RemoveContainer" containerID="f5cac1d3f13e6b2eb03f29d08eb52b00a4c6dc988ba13a6cb3aa461d4f59ec1d" Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.818063 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9jn7k"] Nov 22 08:37:17 crc kubenswrapper[4929]: I1122 08:37:17.962352 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa615d23-6b44-4eec-826c-415504f99413" path="/var/lib/kubelet/pods/aa615d23-6b44-4eec-826c-415504f99413/volumes" Nov 22 08:37:18 crc kubenswrapper[4929]: I1122 08:37:18.757936 4929 generic.go:334] "Generic (PLEG): container finished" podID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerID="0973d4dd97e74fe054f01cd1976c254a7b1f6cac26e9e329adfa35cfe44617b1" exitCode=0 Nov 22 08:37:18 crc kubenswrapper[4929]: I1122 08:37:18.758000 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerDied","Data":"0973d4dd97e74fe054f01cd1976c254a7b1f6cac26e9e329adfa35cfe44617b1"} Nov 22 08:37:20 crc kubenswrapper[4929]: I1122 08:37:20.443248 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:20 crc kubenswrapper[4929]: I1122 08:37:20.443324 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:20 crc kubenswrapper[4929]: I1122 08:37:20.546241 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:27 crc kubenswrapper[4929]: I1122 08:37:27.852472 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerStarted","Data":"60fbf159ff9ee005ba52c235357f4e2fe4cdb3516da1d15e5b8d9fa7d676e286"} Nov 22 08:37:27 crc kubenswrapper[4929]: I1122 08:37:27.874936 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fzbmv" podStartSLOduration=2.743280101 podStartE2EDuration="13.874905424s" podCreationTimestamp="2025-11-22 08:37:14 +0000 UTC" firstStartedPulling="2025-11-22 08:37:15.770797261 +0000 UTC m=+5172.880251274" lastFinishedPulling="2025-11-22 08:37:26.902422584 +0000 UTC m=+5184.011876597" observedRunningTime="2025-11-22 08:37:27.869497579 +0000 UTC m=+5184.978951612" watchObservedRunningTime="2025-11-22 08:37:27.874905424 +0000 UTC m=+5184.984359447" Nov 22 08:37:28 crc kubenswrapper[4929]: I1122 08:37:28.705457 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/util/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.325329 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/util/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.358809 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/pull/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.427180 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/pull/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.590657 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/extract/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.591193 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/pull/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.596324 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_95b15301edd46b38fb4f82cc0cccbbe74eb944ce52e5bf4be8f513e191mnrvg_a5922246-83ed-429e-82eb-f9fd7810d687/util/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.791292 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-h7h67_f90b6c4a-a83b-48df-95ab-0240fee75881/kube-rbac-proxy/0.log" Nov 22 08:37:29 crc kubenswrapper[4929]: I1122 08:37:29.831963 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-hlxc8_aa0e6902-572d-44e6-9a5b-efbbbf188e01/kube-rbac-proxy/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.093119 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-zldhw_3db96ba7-8db5-4603-bab2-bafe30ce4874/kube-rbac-proxy/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.101034 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-h7h67_f90b6c4a-a83b-48df-95ab-0240fee75881/manager/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.109279 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-75fb479bcc-hlxc8_aa0e6902-572d-44e6-9a5b-efbbbf188e01/manager/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.238661 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-zldhw_3db96ba7-8db5-4603-bab2-bafe30ce4874/manager/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.348423 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-b2l86_f0d8223c-7551-48b6-8a56-fd43fb266534/kube-rbac-proxy/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.437875 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-b2l86_f0d8223c-7551-48b6-8a56-fd43fb266534/manager/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.503775 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.568190 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.583501 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-wg5hx_1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f/kube-rbac-proxy/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.613218 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-56f54d6746-wg5hx_1cf9bdb5-5eec-4cf4-a7ef-021a2cc6e59f/manager/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.732172 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-9tqpv_80a127c3-bf01-4a74-8f00-9226fbb485cd/kube-rbac-proxy/0.log" Nov 22 08:37:30 crc kubenswrapper[4929]: I1122 08:37:30.876253 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r4p65" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="registry-server" containerID="cri-o://13f534c5fe10e6916abde065be33a7f5c03a7b90a2b99aa66292028bd48bdf3e" gracePeriod=2 Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.388092 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-9tqpv_80a127c3-bf01-4a74-8f00-9226fbb485cd/manager/0.log" Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.454705 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-62kf4_ccb9635f-f142-4835-99be-509914bbeb1c/kube-rbac-proxy/0.log" Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.649155 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-6dd8864d7c-62kf4_ccb9635f-f142-4835-99be-509914bbeb1c/manager/0.log" Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.719290 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-q5bvk_7ec24093-c914-475e-86b9-2c4ba96791ff/kube-rbac-proxy/0.log" Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.753440 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-q5bvk_7ec24093-c914-475e-86b9-2c4ba96791ff/manager/0.log" Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.897623 4929 generic.go:334] "Generic (PLEG): container finished" podID="36faef4c-842e-42af-8dc6-6999fda45e60" containerID="13f534c5fe10e6916abde065be33a7f5c03a7b90a2b99aa66292028bd48bdf3e" exitCode=0 Nov 22 08:37:31 crc kubenswrapper[4929]: I1122 08:37:31.897736 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerDied","Data":"13f534c5fe10e6916abde065be33a7f5c03a7b90a2b99aa66292028bd48bdf3e"} Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.013353 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-rrdbn_79972b49-f1f3-482e-88ac-63f5e249d537/kube-rbac-proxy/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.226987 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.262467 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-rrdbn_79972b49-f1f3-482e-88ac-63f5e249d537/manager/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.278951 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-9lr2t_3713fa6e-3e99-41b8-bfff-22c91a622841/kube-rbac-proxy/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.321956 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities\") pod \"36faef4c-842e-42af-8dc6-6999fda45e60\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.322075 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbkjl\" (UniqueName: \"kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl\") pod \"36faef4c-842e-42af-8dc6-6999fda45e60\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.322371 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content\") pod \"36faef4c-842e-42af-8dc6-6999fda45e60\" (UID: \"36faef4c-842e-42af-8dc6-6999fda45e60\") " Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.323115 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities" (OuterVolumeSpecName: "utilities") pod "36faef4c-842e-42af-8dc6-6999fda45e60" (UID: "36faef4c-842e-42af-8dc6-6999fda45e60"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.338462 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl" (OuterVolumeSpecName: "kube-api-access-zbkjl") pod "36faef4c-842e-42af-8dc6-6999fda45e60" (UID: "36faef4c-842e-42af-8dc6-6999fda45e60"). InnerVolumeSpecName "kube-api-access-zbkjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.354971 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "36faef4c-842e-42af-8dc6-6999fda45e60" (UID: "36faef4c-842e-42af-8dc6-6999fda45e60"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.355257 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-9lr2t_3713fa6e-3e99-41b8-bfff-22c91a622841/manager/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.425076 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.425124 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/36faef4c-842e-42af-8dc6-6999fda45e60-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.425138 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbkjl\" (UniqueName: \"kubernetes.io/projected/36faef4c-842e-42af-8dc6-6999fda45e60-kube-api-access-zbkjl\") on node \"crc\" DevicePath \"\"" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.469526 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-dzhvl_696093b1-6e6c-4f6d-915b-f1b5481834a5/kube-rbac-proxy/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.594874 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-knndh_e8422098-8e21-47a4-9a93-9e3eed95dde7/kube-rbac-proxy/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.616008 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-54b5986bb8-dzhvl_696093b1-6e6c-4f6d-915b-f1b5481834a5/manager/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.773464 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-78bd47f458-knndh_e8422098-8e21-47a4-9a93-9e3eed95dde7/manager/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.805839 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-ckg8c_5eb78153-a103-4885-b710-406198b25403/kube-rbac-proxy/0.log" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.921362 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r4p65" event={"ID":"36faef4c-842e-42af-8dc6-6999fda45e60","Type":"ContainerDied","Data":"2c8129e905b84c536ac4e8445578eb700ad03f17218083eed26c7a075d2c290c"} Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.921425 4929 scope.go:117] "RemoveContainer" containerID="13f534c5fe10e6916abde065be33a7f5c03a7b90a2b99aa66292028bd48bdf3e" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.921589 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r4p65" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.944560 4929 scope.go:117] "RemoveContainer" containerID="95fa6a10794c5000738d50ae42f496d00f8ce2c4d05d54212ec69fa89b22e418" Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.970143 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.985046 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r4p65"] Nov 22 08:37:32 crc kubenswrapper[4929]: I1122 08:37:32.991671 4929 scope.go:117] "RemoveContainer" containerID="5c81e917a814060637ef31f1dd68a5a12624d33812e26357d826a2873591f710" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.013507 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-ckg8c_5eb78153-a103-4885-b710-406198b25403/manager/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.048575 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-4ssf8_bf7b0c38-b9e7-4036-a9c0-9494b65c8714/kube-rbac-proxy/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.050648 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-4ssf8_bf7b0c38-b9e7-4036-a9c0-9494b65c8714/manager/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: E1122 08:37:33.112624 4929 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36faef4c_842e_42af_8dc6_6999fda45e60.slice/crio-2c8129e905b84c536ac4e8445578eb700ad03f17218083eed26c7a075d2c290c\": RecentStats: unable to find data in memory cache]" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.117499 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz_d9191fcc-97a0-4e23-bc96-89dfbd474f25/kube-rbac-proxy/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.219575 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-8c7444f48-x9fhz_d9191fcc-97a0-4e23-bc96-89dfbd474f25/manager/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.275015 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6576b65d54-nl6qn_0bc28b5a-7c1e-4252-828b-92057d3d6749/kube-rbac-proxy/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.426026 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-64f4bfcd74-twkf7_22cf71fc-be54-4a37-8663-2b9419232940/kube-rbac-proxy/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.662808 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-pfl57_f740d689-ada3-413f-a904-e09e08e09a1b/registry-server/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.743990 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-64f4bfcd74-twkf7_22cf71fc-be54-4a37-8663-2b9419232940/operator/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.822285 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-2lnbz_38070268-a2fc-4876-a8c2-81dcd0bc7f28/kube-rbac-proxy/0.log" Nov 22 08:37:33 crc kubenswrapper[4929]: I1122 08:37:33.958999 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" path="/var/lib/kubelet/pods/36faef4c-842e-42af-8dc6-6999fda45e60/volumes" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.015672 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-m7z2n_9edcc36b-ecc0-4d9a-b549-e370b4a5e018/kube-rbac-proxy/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.044667 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-54fc5f65b7-2lnbz_38070268-a2fc-4876-a8c2-81dcd0bc7f28/manager/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.170652 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-m7z2n_9edcc36b-ecc0-4d9a-b549-e370b4a5e018/manager/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.362443 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-djv62_4683d9d2-95c8-4fa3-a710-9b03cce32d1f/operator/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.478397 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-2trxf_edea9302-eeed-48b3-a75f-170198cd1b2f/kube-rbac-proxy/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.601833 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.601895 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.619571 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-xh4kd_027a936c-fba7-49af-b9b7-e849b356c87c/kube-rbac-proxy/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.670262 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-2trxf_edea9302-eeed-48b3-a75f-170198cd1b2f/manager/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.682243 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6576b65d54-nl6qn_0bc28b5a-7c1e-4252-828b-92057d3d6749/manager/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.854348 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-g64vx_91edab4b-0002-4a85-9ab9-9475a07e0be4/kube-rbac-proxy/0.log" Nov 22 08:37:34 crc kubenswrapper[4929]: I1122 08:37:34.897030 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-b4c496f69-g64vx_91edab4b-0002-4a85-9ab9-9475a07e0be4/manager/0.log" Nov 22 08:37:35 crc kubenswrapper[4929]: I1122 08:37:35.022310 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-xh4kd_027a936c-fba7-49af-b9b7-e849b356c87c/manager/0.log" Nov 22 08:37:35 crc kubenswrapper[4929]: I1122 08:37:35.101969 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-747b777d6b-wwnbh_62c58af8-c121-43e3-b91c-524e2d2783bd/kube-rbac-proxy/0.log" Nov 22 08:37:35 crc kubenswrapper[4929]: I1122 08:37:35.307582 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-747b777d6b-wwnbh_62c58af8-c121-43e3-b91c-524e2d2783bd/manager/0.log" Nov 22 08:37:35 crc kubenswrapper[4929]: I1122 08:37:35.648495 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:37:35 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:37:35 crc kubenswrapper[4929]: > Nov 22 08:37:45 crc kubenswrapper[4929]: I1122 08:37:45.648946 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:37:45 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:37:45 crc kubenswrapper[4929]: > Nov 22 08:37:52 crc kubenswrapper[4929]: I1122 08:37:52.758815 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-k7jgs_964d3766-8968-4ce0-b68c-bf839937e0d3/control-plane-machine-set-operator/0.log" Nov 22 08:37:52 crc kubenswrapper[4929]: I1122 08:37:52.862853 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-htp4z_c3f44df6-8db0-4f77-ae46-f361e5d8948f/kube-rbac-proxy/0.log" Nov 22 08:37:52 crc kubenswrapper[4929]: I1122 08:37:52.913313 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-htp4z_c3f44df6-8db0-4f77-ae46-f361e5d8948f/machine-api-operator/0.log" Nov 22 08:37:55 crc kubenswrapper[4929]: I1122 08:37:55.660201 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:37:55 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:37:55 crc kubenswrapper[4929]: > Nov 22 08:38:05 crc kubenswrapper[4929]: I1122 08:38:05.658709 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:38:05 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:38:05 crc kubenswrapper[4929]: > Nov 22 08:38:06 crc kubenswrapper[4929]: I1122 08:38:06.724758 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-4wpmv_b471dac5-a291-4b5f-81fd-e17e4166a0fa/cert-manager-controller/0.log" Nov 22 08:38:06 crc kubenswrapper[4929]: I1122 08:38:06.883802 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-lfmhb_89a720f9-04b9-4275-b820-c65476faa487/cert-manager-cainjector/0.log" Nov 22 08:38:06 crc kubenswrapper[4929]: I1122 08:38:06.991580 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-b8d64_255da6c7-3a6c-4cda-99f3-1d8b3ed48139/cert-manager-webhook/0.log" Nov 22 08:38:16 crc kubenswrapper[4929]: I1122 08:38:15.650940 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:38:16 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:38:16 crc kubenswrapper[4929]: > Nov 22 08:38:18 crc kubenswrapper[4929]: I1122 08:38:18.594399 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:38:18 crc kubenswrapper[4929]: I1122 08:38:18.595037 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:38:18 crc kubenswrapper[4929]: I1122 08:38:18.774688 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-fmlm4_7b597195-80c1-40e6-8617-0bd8ce0d81bd/nmstate-console-plugin/0.log" Nov 22 08:38:18 crc kubenswrapper[4929]: I1122 08:38:18.916289 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-hbj5r_fe99af20-a569-4615-9155-4656a022f118/nmstate-handler/0.log" Nov 22 08:38:18 crc kubenswrapper[4929]: I1122 08:38:18.997155 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-mrx5d_8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b/kube-rbac-proxy/0.log" Nov 22 08:38:19 crc kubenswrapper[4929]: I1122 08:38:19.027310 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-mrx5d_8dfa9ce6-f8c2-4b30-81e9-0bd57c98096b/nmstate-metrics/0.log" Nov 22 08:38:19 crc kubenswrapper[4929]: I1122 08:38:19.216413 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-pl877_8235a65f-ef76-4fee-9bcf-3d0fc36d330a/nmstate-operator/0.log" Nov 22 08:38:19 crc kubenswrapper[4929]: I1122 08:38:19.243395 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-g7tnr_d03ab878-ec20-4ce1-be9d-bad1a6f35b99/nmstate-webhook/0.log" Nov 22 08:38:25 crc kubenswrapper[4929]: I1122 08:38:25.653198 4929 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" probeResult="failure" output=< Nov 22 08:38:25 crc kubenswrapper[4929]: timeout: failed to connect service ":50051" within 1s Nov 22 08:38:25 crc kubenswrapper[4929]: > Nov 22 08:38:34 crc kubenswrapper[4929]: I1122 08:38:34.674990 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:38:34 crc kubenswrapper[4929]: I1122 08:38:34.732760 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:38:34 crc kubenswrapper[4929]: I1122 08:38:34.919844 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:38:35 crc kubenswrapper[4929]: I1122 08:38:35.892578 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hpt9g_129ee7fb-2597-475d-882e-8064f3e4b4fc/kube-rbac-proxy/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.188549 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-frr-files/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.346124 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-hpt9g_129ee7fb-2597-475d-882e-8064f3e4b4fc/controller/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.399135 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-frr-files/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.410358 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-reloader/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.410404 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-metrics/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.569079 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-reloader/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.580240 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fzbmv" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" containerID="cri-o://60fbf159ff9ee005ba52c235357f4e2fe4cdb3516da1d15e5b8d9fa7d676e286" gracePeriod=2 Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.805979 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-frr-files/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.814361 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-reloader/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.844643 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-metrics/0.log" Nov 22 08:38:36 crc kubenswrapper[4929]: I1122 08:38:36.921426 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-metrics/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.053386 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-reloader/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.062768 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-metrics/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.098787 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/cp-frr-files/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.134653 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/controller/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.292310 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/frr-metrics/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.379967 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/kube-rbac-proxy/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.453540 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/kube-rbac-proxy-frr/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.604970 4929 generic.go:334] "Generic (PLEG): container finished" podID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerID="60fbf159ff9ee005ba52c235357f4e2fe4cdb3516da1d15e5b8d9fa7d676e286" exitCode=0 Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.605017 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerDied","Data":"60fbf159ff9ee005ba52c235357f4e2fe4cdb3516da1d15e5b8d9fa7d676e286"} Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.608477 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/reloader/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.745527 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-ph794_636d3dfa-e47c-487c-a23f-4c4b8f65a69c/frr-k8s-webhook-server/0.log" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.752519 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.834831 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content\") pod \"7407e2e2-93e4-4598-8c40-77086e07bf74\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.834943 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2pjx\" (UniqueName: \"kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx\") pod \"7407e2e2-93e4-4598-8c40-77086e07bf74\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.835258 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities\") pod \"7407e2e2-93e4-4598-8c40-77086e07bf74\" (UID: \"7407e2e2-93e4-4598-8c40-77086e07bf74\") " Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.836050 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities" (OuterVolumeSpecName: "utilities") pod "7407e2e2-93e4-4598-8c40-77086e07bf74" (UID: "7407e2e2-93e4-4598-8c40-77086e07bf74"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.842063 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx" (OuterVolumeSpecName: "kube-api-access-k2pjx") pod "7407e2e2-93e4-4598-8c40-77086e07bf74" (UID: "7407e2e2-93e4-4598-8c40-77086e07bf74"). InnerVolumeSpecName "kube-api-access-k2pjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.938002 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.938062 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2pjx\" (UniqueName: \"kubernetes.io/projected/7407e2e2-93e4-4598-8c40-77086e07bf74-kube-api-access-k2pjx\") on node \"crc\" DevicePath \"\"" Nov 22 08:38:37 crc kubenswrapper[4929]: I1122 08:38:37.953432 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7407e2e2-93e4-4598-8c40-77086e07bf74" (UID: "7407e2e2-93e4-4598-8c40-77086e07bf74"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.038239 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7bc5fd5f85-qf654_8ce2ff51-c2c3-4e0d-b6fc-73259af320ab/manager/0.log" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.040655 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7407e2e2-93e4-4598-8c40-77086e07bf74-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.207597 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-9d7d9d8fc-9m2z7_8a3d08f2-6eea-44d4-9f2a-dd1bc67f4197/webhook-server/0.log" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.254903 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f8zsn_e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf/kube-rbac-proxy/0.log" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.626950 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fzbmv" event={"ID":"7407e2e2-93e4-4598-8c40-77086e07bf74","Type":"ContainerDied","Data":"c738f2788bbe0698a2276628f98f23a3c19e1b59d09fb00eefe703a5f891dc66"} Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.627468 4929 scope.go:117] "RemoveContainer" containerID="60fbf159ff9ee005ba52c235357f4e2fe4cdb3516da1d15e5b8d9fa7d676e286" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.627056 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fzbmv" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.657895 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.659694 4929 scope.go:117] "RemoveContainer" containerID="0973d4dd97e74fe054f01cd1976c254a7b1f6cac26e9e329adfa35cfe44617b1" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.669165 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fzbmv"] Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.699272 4929 scope.go:117] "RemoveContainer" containerID="c1de85bd6398974d55ce468ce1f2e3c1d95c54d88d2952d4d849758db3cb9923" Nov 22 08:38:38 crc kubenswrapper[4929]: I1122 08:38:38.846929 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-4vf85_5419f653-54d8-407c-bf43-0258a3b4451c/frr/0.log" Nov 22 08:38:39 crc kubenswrapper[4929]: I1122 08:38:39.284847 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-f8zsn_e2d7c1fd-e426-4a70-9c6f-3aff0bb0e1bf/speaker/0.log" Nov 22 08:38:39 crc kubenswrapper[4929]: I1122 08:38:39.961780 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" path="/var/lib/kubelet/pods/7407e2e2-93e4-4598-8c40-77086e07bf74/volumes" Nov 22 08:38:48 crc kubenswrapper[4929]: I1122 08:38:48.594636 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:38:48 crc kubenswrapper[4929]: I1122 08:38:48.595306 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:38:54 crc kubenswrapper[4929]: I1122 08:38:54.734349 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/util/0.log" Nov 22 08:38:54 crc kubenswrapper[4929]: I1122 08:38:54.989325 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/pull/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.011789 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/util/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.021794 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/pull/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.282669 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/util/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.290074 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/pull/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.319061 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772e9gt4d_03b26279-625b-4afd-be4e-8bd77491463b/extract/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.521499 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/util/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.737438 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/pull/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.787531 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/util/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.812397 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/pull/0.log" Nov 22 08:38:55 crc kubenswrapper[4929]: I1122 08:38:55.994804 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/pull/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.033593 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/util/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.044884 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c92106d5t7_de0e8764-3b4b-4ce2-83a4-27ae50c897e8/extract/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.211374 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-utilities/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.445245 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-utilities/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.462532 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-content/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.491330 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-content/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.698328 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-content/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.714846 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/extract-utilities/0.log" Nov 22 08:38:56 crc kubenswrapper[4929]: I1122 08:38:56.974822 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-utilities/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.327995 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-utilities/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.376401 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-content/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.376694 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-content/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.561411 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-content/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.611547 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-b658w_12d2673e-b955-421a-839a-56222dc85e7b/registry-server/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.670194 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/extract-utilities/0.log" Nov 22 08:38:57 crc kubenswrapper[4929]: I1122 08:38:57.915445 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/util/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.311441 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bhx9f_1515626b-94a1-4527-8129-14fe7afaf988/registry-server/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.340606 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/util/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.448121 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/pull/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.481602 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/pull/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.684351 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/pull/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.684698 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/extract/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.733612 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6rsz9r_17709bed-38bb-4897-8dcd-86c17b6763ec/util/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.765775 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-ttdfk_bce25129-0d0f-4786-96a1-5c7b902e6d71/marketplace-operator/0.log" Nov 22 08:38:58 crc kubenswrapper[4929]: I1122 08:38:58.920334 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.120555 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.145611 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-content/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.181639 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-content/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.381849 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.468009 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.471449 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/extract-content/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.629421 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-jpvhx_f15451b8-3b4b-4f3b-b85c-0876b352e959/registry-server/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.712469 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-content/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.737373 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-content/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.783452 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.936489 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-utilities/0.log" Nov 22 08:38:59 crc kubenswrapper[4929]: I1122 08:38:59.938304 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/extract-content/0.log" Nov 22 08:39:00 crc kubenswrapper[4929]: I1122 08:39:00.660898 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-cwsj2_ccde4b2b-da1c-4112-99de-c3ee5cb2d4e2/registry-server/0.log" Nov 22 08:39:13 crc kubenswrapper[4929]: I1122 08:39:13.766593 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-88c4k_b8383ad3-1d58-4c89-ab4b-874351c249f3/prometheus-operator/0.log" Nov 22 08:39:13 crc kubenswrapper[4929]: I1122 08:39:13.971735 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-864d7797b9-dkh4p_ddfa0602-ba6c-4d22-9842-a824a8b4a5b4/prometheus-operator-admission-webhook/0.log" Nov 22 08:39:14 crc kubenswrapper[4929]: I1122 08:39:14.049851 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-864d7797b9-htblg_180a8947-9c9f-4870-bfec-07b7cb8c378a/prometheus-operator-admission-webhook/0.log" Nov 22 08:39:14 crc kubenswrapper[4929]: I1122 08:39:14.247394 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-h45rh_117a2144-28d4-4377-973e-3eea96a6a609/operator/0.log" Nov 22 08:39:14 crc kubenswrapper[4929]: I1122 08:39:14.341522 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-t5xzk_1e90cb83-8f01-48a0-9d5e-9dbdacb9c9b8/perses-operator/0.log" Nov 22 08:39:18 crc kubenswrapper[4929]: I1122 08:39:18.594855 4929 patch_prober.go:28] interesting pod/machine-config-daemon-dssfx container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 08:39:18 crc kubenswrapper[4929]: I1122 08:39:18.595667 4929 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 08:39:18 crc kubenswrapper[4929]: I1122 08:39:18.595722 4929 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" Nov 22 08:39:18 crc kubenswrapper[4929]: I1122 08:39:18.603948 4929 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95"} pod="openshift-machine-config-operator/machine-config-daemon-dssfx" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 08:39:18 crc kubenswrapper[4929]: I1122 08:39:18.604025 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" containerName="machine-config-daemon" containerID="cri-o://f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" gracePeriod=600 Nov 22 08:39:18 crc kubenswrapper[4929]: E1122 08:39:18.800576 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:39:19 crc kubenswrapper[4929]: I1122 08:39:19.022904 4929 generic.go:334] "Generic (PLEG): container finished" podID="470531cb-120c-48d9-80e1-adf074cf3055" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" exitCode=0 Nov 22 08:39:19 crc kubenswrapper[4929]: I1122 08:39:19.022973 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerDied","Data":"f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95"} Nov 22 08:39:19 crc kubenswrapper[4929]: I1122 08:39:19.023294 4929 scope.go:117] "RemoveContainer" containerID="4ba78b64f79a8e6b71d65feaee6655b928c77e644338723a94ed54163b488f75" Nov 22 08:39:19 crc kubenswrapper[4929]: I1122 08:39:19.024227 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:39:19 crc kubenswrapper[4929]: E1122 08:39:19.024498 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:39:31 crc kubenswrapper[4929]: I1122 08:39:31.948613 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:39:31 crc kubenswrapper[4929]: E1122 08:39:31.949534 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:39:42 crc kubenswrapper[4929]: E1122 08:39:42.163903 4929 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.27:46090->38.102.83.27:38781: read tcp 38.102.83.27:46090->38.102.83.27:38781: read: connection reset by peer Nov 22 08:39:46 crc kubenswrapper[4929]: I1122 08:39:46.950625 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:39:46 crc kubenswrapper[4929]: E1122 08:39:46.951774 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:39:58 crc kubenswrapper[4929]: I1122 08:39:58.947312 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:39:58 crc kubenswrapper[4929]: E1122 08:39:58.948106 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:40:13 crc kubenswrapper[4929]: I1122 08:40:13.954887 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:40:13 crc kubenswrapper[4929]: E1122 08:40:13.956344 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:40:24 crc kubenswrapper[4929]: I1122 08:40:24.948377 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:40:24 crc kubenswrapper[4929]: E1122 08:40:24.949244 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:40:36 crc kubenswrapper[4929]: I1122 08:40:36.948270 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:40:36 crc kubenswrapper[4929]: E1122 08:40:36.949158 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:40:51 crc kubenswrapper[4929]: I1122 08:40:51.953858 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:40:51 crc kubenswrapper[4929]: E1122 08:40:51.958222 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:40:56 crc kubenswrapper[4929]: I1122 08:40:56.995236 4929 generic.go:334] "Generic (PLEG): container finished" podID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerID="f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0" exitCode=0 Nov 22 08:40:56 crc kubenswrapper[4929]: I1122 08:40:56.995340 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" event={"ID":"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d","Type":"ContainerDied","Data":"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0"} Nov 22 08:40:56 crc kubenswrapper[4929]: I1122 08:40:56.996464 4929 scope.go:117] "RemoveContainer" containerID="f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0" Nov 22 08:40:57 crc kubenswrapper[4929]: I1122 08:40:57.125246 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nmgqd_must-gather-5gcnm_86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d/gather/0.log" Nov 22 08:41:04 crc kubenswrapper[4929]: I1122 08:41:04.508122 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-nmgqd/must-gather-5gcnm"] Nov 22 08:41:04 crc kubenswrapper[4929]: I1122 08:41:04.509619 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="copy" containerID="cri-o://9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f" gracePeriod=2 Nov 22 08:41:04 crc kubenswrapper[4929]: I1122 08:41:04.522769 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-nmgqd/must-gather-5gcnm"] Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.048880 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nmgqd_must-gather-5gcnm_86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d/copy/0.log" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.050388 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.079314 4929 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-nmgqd_must-gather-5gcnm_86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d/copy/0.log" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.080031 4929 generic.go:334] "Generic (PLEG): container finished" podID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerID="9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f" exitCode=143 Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.080114 4929 scope.go:117] "RemoveContainer" containerID="9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.080340 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-nmgqd/must-gather-5gcnm" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.118038 4929 scope.go:117] "RemoveContainer" containerID="f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.178667 4929 scope.go:117] "RemoveContainer" containerID="9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f" Nov 22 08:41:05 crc kubenswrapper[4929]: E1122 08:41:05.179060 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f\": container with ID starting with 9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f not found: ID does not exist" containerID="9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.179117 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f"} err="failed to get container status \"9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f\": rpc error: code = NotFound desc = could not find container \"9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f\": container with ID starting with 9f2494d28834c7bf9b457de3678497fc701a710d0120c161e62a48dc2605ab2f not found: ID does not exist" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.179147 4929 scope.go:117] "RemoveContainer" containerID="f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0" Nov 22 08:41:05 crc kubenswrapper[4929]: E1122 08:41:05.179429 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0\": container with ID starting with f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0 not found: ID does not exist" containerID="f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.179460 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0"} err="failed to get container status \"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0\": rpc error: code = NotFound desc = could not find container \"f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0\": container with ID starting with f020917a053e1bd139db5d40e2265ae2c4254e6fa91cae222c06fa229beb47d0 not found: ID does not exist" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.221850 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7vpw\" (UniqueName: \"kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw\") pod \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.221944 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output\") pod \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\" (UID: \"86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d\") " Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.257433 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw" (OuterVolumeSpecName: "kube-api-access-w7vpw") pod "86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" (UID: "86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d"). InnerVolumeSpecName "kube-api-access-w7vpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.324890 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7vpw\" (UniqueName: \"kubernetes.io/projected/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-kube-api-access-w7vpw\") on node \"crc\" DevicePath \"\"" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.402628 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" (UID: "86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.427489 4929 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 08:41:05 crc kubenswrapper[4929]: I1122 08:41:05.960167 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" path="/var/lib/kubelet/pods/86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d/volumes" Nov 22 08:41:06 crc kubenswrapper[4929]: I1122 08:41:06.949711 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:41:06 crc kubenswrapper[4929]: E1122 08:41:06.949943 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:41:21 crc kubenswrapper[4929]: I1122 08:41:21.947671 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:41:21 crc kubenswrapper[4929]: E1122 08:41:21.948683 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:41:33 crc kubenswrapper[4929]: I1122 08:41:33.954049 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:41:33 crc kubenswrapper[4929]: E1122 08:41:33.954942 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:41:45 crc kubenswrapper[4929]: I1122 08:41:45.947541 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:41:45 crc kubenswrapper[4929]: E1122 08:41:45.948293 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:41:57 crc kubenswrapper[4929]: I1122 08:41:57.948515 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:41:57 crc kubenswrapper[4929]: E1122 08:41:57.949370 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:42:11 crc kubenswrapper[4929]: I1122 08:42:11.947301 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:42:11 crc kubenswrapper[4929]: E1122 08:42:11.948283 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:42:15 crc kubenswrapper[4929]: I1122 08:42:15.122204 4929 scope.go:117] "RemoveContainer" containerID="1b7a4e67fd07168f7162b22d96969026d61d93855a9493f23065fb7e5573b00e" Nov 22 08:42:25 crc kubenswrapper[4929]: I1122 08:42:25.947943 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:42:25 crc kubenswrapper[4929]: E1122 08:42:25.948978 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:42:37 crc kubenswrapper[4929]: I1122 08:42:37.947818 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:42:37 crc kubenswrapper[4929]: E1122 08:42:37.949519 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:42:52 crc kubenswrapper[4929]: I1122 08:42:52.947257 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:42:52 crc kubenswrapper[4929]: E1122 08:42:52.949151 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:43:06 crc kubenswrapper[4929]: I1122 08:43:06.946979 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:43:06 crc kubenswrapper[4929]: E1122 08:43:06.947795 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:43:18 crc kubenswrapper[4929]: I1122 08:43:18.947822 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:43:18 crc kubenswrapper[4929]: E1122 08:43:18.948709 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.618626 4929 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.619865 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.619881 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.619901 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.619909 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.619927 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.619934 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.619957 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.619965 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.619980 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.619987 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="extract-utilities" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620000 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620009 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620026 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620033 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620050 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620057 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620066 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620073 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="extract-content" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620088 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="gather" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620095 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="gather" Nov 22 08:43:27 crc kubenswrapper[4929]: E1122 08:43:27.620118 4929 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="copy" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.620145 4929 state_mem.go:107] "Deleted CPUSet assignment" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="copy" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.621643 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="copy" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.621673 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="86bfc9f2-ef0b-4deb-b7e4-513fdff70e0d" containerName="gather" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.621693 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa615d23-6b44-4eec-826c-415504f99413" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.621706 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="7407e2e2-93e4-4598-8c40-77086e07bf74" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.621723 4929 memory_manager.go:354] "RemoveStaleState removing state" podUID="36faef4c-842e-42af-8dc6-6999fda45e60" containerName="registry-server" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.623635 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.631394 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.736346 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.736481 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc5nj\" (UniqueName: \"kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.736520 4929 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.838113 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.838177 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc5nj\" (UniqueName: \"kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.838197 4929 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.839009 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.839274 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:27 crc kubenswrapper[4929]: I1122 08:43:27.859308 4929 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc5nj\" (UniqueName: \"kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj\") pod \"community-operators-dfwdt\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:28 crc kubenswrapper[4929]: I1122 08:43:28.016010 4929 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:28 crc kubenswrapper[4929]: I1122 08:43:28.519504 4929 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:29 crc kubenswrapper[4929]: I1122 08:43:29.386162 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e28b4df-dac4-4701-a4c0-d714baadcdcf" containerID="6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f" exitCode=0 Nov 22 08:43:29 crc kubenswrapper[4929]: I1122 08:43:29.386413 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerDied","Data":"6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f"} Nov 22 08:43:29 crc kubenswrapper[4929]: I1122 08:43:29.387658 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerStarted","Data":"cf14f049146832d2293fe1d8e0fd91388375189d6a002718e691d044ec0154d0"} Nov 22 08:43:29 crc kubenswrapper[4929]: I1122 08:43:29.389183 4929 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 08:43:31 crc kubenswrapper[4929]: I1122 08:43:31.404511 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerStarted","Data":"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f"} Nov 22 08:43:31 crc kubenswrapper[4929]: I1122 08:43:31.947387 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:43:31 crc kubenswrapper[4929]: E1122 08:43:31.948250 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:43:34 crc kubenswrapper[4929]: I1122 08:43:34.435072 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e28b4df-dac4-4701-a4c0-d714baadcdcf" containerID="16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f" exitCode=0 Nov 22 08:43:34 crc kubenswrapper[4929]: I1122 08:43:34.435155 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerDied","Data":"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f"} Nov 22 08:43:36 crc kubenswrapper[4929]: I1122 08:43:36.455289 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerStarted","Data":"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4"} Nov 22 08:43:36 crc kubenswrapper[4929]: I1122 08:43:36.489683 4929 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dfwdt" podStartSLOduration=3.6139640870000003 podStartE2EDuration="9.489661362s" podCreationTimestamp="2025-11-22 08:43:27 +0000 UTC" firstStartedPulling="2025-11-22 08:43:29.388901326 +0000 UTC m=+5546.498355339" lastFinishedPulling="2025-11-22 08:43:35.264598601 +0000 UTC m=+5552.374052614" observedRunningTime="2025-11-22 08:43:36.479822666 +0000 UTC m=+5553.589276679" watchObservedRunningTime="2025-11-22 08:43:36.489661362 +0000 UTC m=+5553.599115375" Nov 22 08:43:38 crc kubenswrapper[4929]: I1122 08:43:38.016810 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:38 crc kubenswrapper[4929]: I1122 08:43:38.016870 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:38 crc kubenswrapper[4929]: I1122 08:43:38.068588 4929 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:45 crc kubenswrapper[4929]: I1122 08:43:45.947776 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:43:45 crc kubenswrapper[4929]: E1122 08:43:45.948491 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:43:48 crc kubenswrapper[4929]: I1122 08:43:48.079588 4929 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:48 crc kubenswrapper[4929]: I1122 08:43:48.141447 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:48 crc kubenswrapper[4929]: I1122 08:43:48.564529 4929 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dfwdt" podUID="8e28b4df-dac4-4701-a4c0-d714baadcdcf" containerName="registry-server" containerID="cri-o://af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4" gracePeriod=2 Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.071197 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.199981 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc5nj\" (UniqueName: \"kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj\") pod \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.200548 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content\") pod \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.200671 4929 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities\") pod \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\" (UID: \"8e28b4df-dac4-4701-a4c0-d714baadcdcf\") " Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.202675 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities" (OuterVolumeSpecName: "utilities") pod "8e28b4df-dac4-4701-a4c0-d714baadcdcf" (UID: "8e28b4df-dac4-4701-a4c0-d714baadcdcf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.210568 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj" (OuterVolumeSpecName: "kube-api-access-bc5nj") pod "8e28b4df-dac4-4701-a4c0-d714baadcdcf" (UID: "8e28b4df-dac4-4701-a4c0-d714baadcdcf"). InnerVolumeSpecName "kube-api-access-bc5nj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.270587 4929 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e28b4df-dac4-4701-a4c0-d714baadcdcf" (UID: "8e28b4df-dac4-4701-a4c0-d714baadcdcf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.303596 4929 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc5nj\" (UniqueName: \"kubernetes.io/projected/8e28b4df-dac4-4701-a4c0-d714baadcdcf-kube-api-access-bc5nj\") on node \"crc\" DevicePath \"\"" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.303652 4929 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.303667 4929 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e28b4df-dac4-4701-a4c0-d714baadcdcf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.576072 4929 generic.go:334] "Generic (PLEG): container finished" podID="8e28b4df-dac4-4701-a4c0-d714baadcdcf" containerID="af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4" exitCode=0 Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.576139 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerDied","Data":"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4"} Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.576244 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dfwdt" event={"ID":"8e28b4df-dac4-4701-a4c0-d714baadcdcf","Type":"ContainerDied","Data":"cf14f049146832d2293fe1d8e0fd91388375189d6a002718e691d044ec0154d0"} Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.576269 4929 scope.go:117] "RemoveContainer" containerID="af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.576285 4929 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dfwdt" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.606354 4929 scope.go:117] "RemoveContainer" containerID="16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.611417 4929 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.620219 4929 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dfwdt"] Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.656911 4929 scope.go:117] "RemoveContainer" containerID="6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.691019 4929 scope.go:117] "RemoveContainer" containerID="af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4" Nov 22 08:43:49 crc kubenswrapper[4929]: E1122 08:43:49.691706 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4\": container with ID starting with af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4 not found: ID does not exist" containerID="af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.691743 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4"} err="failed to get container status \"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4\": rpc error: code = NotFound desc = could not find container \"af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4\": container with ID starting with af8522e394cde97a248ec5a0b35c8450c5df6c24d3a274337bb13eb6a13b1af4 not found: ID does not exist" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.691786 4929 scope.go:117] "RemoveContainer" containerID="16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f" Nov 22 08:43:49 crc kubenswrapper[4929]: E1122 08:43:49.692180 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f\": container with ID starting with 16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f not found: ID does not exist" containerID="16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.692230 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f"} err="failed to get container status \"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f\": rpc error: code = NotFound desc = could not find container \"16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f\": container with ID starting with 16c9dbf5ec59baaf7bbf1baa6ddfd28e11a90e8b30bcb27c006cfa54ed4d703f not found: ID does not exist" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.692258 4929 scope.go:117] "RemoveContainer" containerID="6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f" Nov 22 08:43:49 crc kubenswrapper[4929]: E1122 08:43:49.692580 4929 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f\": container with ID starting with 6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f not found: ID does not exist" containerID="6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.692603 4929 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f"} err="failed to get container status \"6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f\": rpc error: code = NotFound desc = could not find container \"6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f\": container with ID starting with 6e3e0f2f8ae53291c1d1a06c7c32bf99902eaf8a4ef1a87b8a09216a2d1b573f not found: ID does not exist" Nov 22 08:43:49 crc kubenswrapper[4929]: I1122 08:43:49.958587 4929 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e28b4df-dac4-4701-a4c0-d714baadcdcf" path="/var/lib/kubelet/pods/8e28b4df-dac4-4701-a4c0-d714baadcdcf/volumes" Nov 22 08:43:58 crc kubenswrapper[4929]: I1122 08:43:58.947636 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:43:58 crc kubenswrapper[4929]: E1122 08:43:58.948435 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:44:13 crc kubenswrapper[4929]: I1122 08:44:13.954070 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:44:13 crc kubenswrapper[4929]: E1122 08:44:13.955618 4929 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dssfx_openshift-machine-config-operator(470531cb-120c-48d9-80e1-adf074cf3055)\"" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" podUID="470531cb-120c-48d9-80e1-adf074cf3055" Nov 22 08:44:26 crc kubenswrapper[4929]: I1122 08:44:26.950316 4929 scope.go:117] "RemoveContainer" containerID="f91f6764425145ed16b4cbf3da18d72c4cb796893305ba4aa182cb05e2166d95" Nov 22 08:44:27 crc kubenswrapper[4929]: I1122 08:44:27.900522 4929 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dssfx" event={"ID":"470531cb-120c-48d9-80e1-adf074cf3055","Type":"ContainerStarted","Data":"921f7daa3ca380d503fc0d04ce5024280d1f5aa78e7512337435c8efc3fa5bab"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110273773024453 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110273774017371 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110260400016472 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110260400015442 5ustar corecore